2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
34 static struct drm_i915_gem_object
*
35 i915_gem_next_active_object(struct drm_device
*dev
,
36 struct list_head
**render_iter
,
37 struct list_head
**bsd_iter
)
39 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
40 struct drm_i915_gem_object
*render_obj
= NULL
, *bsd_obj
= NULL
;
42 if (*render_iter
!= &dev_priv
->render_ring
.active_list
)
43 render_obj
= list_entry(*render_iter
,
44 struct drm_i915_gem_object
,
48 if (*bsd_iter
!= &dev_priv
->bsd_ring
.active_list
)
49 bsd_obj
= list_entry(*bsd_iter
,
50 struct drm_i915_gem_object
,
53 if (render_obj
== NULL
) {
54 *bsd_iter
= (*bsd_iter
)->next
;
58 if (bsd_obj
== NULL
) {
59 *render_iter
= (*render_iter
)->next
;
63 /* XXX can we handle seqno wrapping? */
64 if (render_obj
->last_rendering_seqno
< bsd_obj
->last_rendering_seqno
) {
65 *render_iter
= (*render_iter
)->next
;
68 *bsd_iter
= (*bsd_iter
)->next
;
72 *render_iter
= (*render_iter
)->next
;
78 mark_free(struct drm_i915_gem_object
*obj_priv
,
79 struct list_head
*unwind
)
81 list_add(&obj_priv
->evict_list
, unwind
);
82 return drm_mm_scan_add_block(obj_priv
->gtt_space
);
85 #define i915_for_each_active_object(OBJ, R, B) \
86 *(R) = dev_priv->render_ring.active_list.next; \
87 *(B) = dev_priv->bsd_ring.active_list.next; \
88 while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
91 i915_gem_evict_something(struct drm_device
*dev
, int min_size
, unsigned alignment
)
93 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
94 struct list_head eviction_list
, unwind_list
;
95 struct drm_i915_gem_object
*obj_priv
, *tmp_obj_priv
;
96 struct list_head
*render_iter
, *bsd_iter
;
99 i915_gem_retire_requests(dev
);
101 /* Re-check for free space after retiring requests */
102 if (drm_mm_search_free(&dev_priv
->mm
.gtt_space
,
103 min_size
, alignment
, 0))
107 * The goal is to evict objects and amalgamate space in LRU order.
108 * The oldest idle objects reside on the inactive list, which is in
109 * retirement order. The next objects to retire are those on the (per
110 * ring) active list that do not have an outstanding flush. Once the
111 * hardware reports completion (the seqno is updated after the
112 * batchbuffer has been finished) the clean buffer objects would
113 * be retired to the inactive list. Any dirty objects would be added
114 * to the tail of the flushing list. So after processing the clean
115 * active objects we need to emit a MI_FLUSH to retire the flushing
116 * list, hence the retirement order of the flushing list is in
117 * advance of the dirty objects on the active lists.
119 * The retirement sequence is thus:
120 * 1. Inactive objects (already retired)
121 * 2. Clean active objects
123 * 4. Dirty active objects.
125 * On each list, the oldest objects lie at the HEAD with the freshest
126 * object on the TAIL.
129 INIT_LIST_HEAD(&unwind_list
);
130 drm_mm_init_scan(&dev_priv
->mm
.gtt_space
, min_size
, alignment
);
132 /* First see if there is a large enough contiguous idle region... */
133 list_for_each_entry(obj_priv
, &dev_priv
->mm
.inactive_list
, list
) {
134 if (mark_free(obj_priv
, &unwind_list
))
138 /* Now merge in the soon-to-be-expired objects... */
139 i915_for_each_active_object(obj_priv
, &render_iter
, &bsd_iter
) {
140 /* Does the object require an outstanding flush? */
141 if (obj_priv
->base
.write_domain
|| obj_priv
->pin_count
)
144 if (mark_free(obj_priv
, &unwind_list
))
148 /* Finally add anything with a pending flush (in order of retirement) */
149 list_for_each_entry(obj_priv
, &dev_priv
->mm
.flushing_list
, list
) {
150 if (obj_priv
->pin_count
)
153 if (mark_free(obj_priv
, &unwind_list
))
156 i915_for_each_active_object(obj_priv
, &render_iter
, &bsd_iter
) {
157 if (! obj_priv
->base
.write_domain
|| obj_priv
->pin_count
)
160 if (mark_free(obj_priv
, &unwind_list
))
164 /* Nothing found, clean up and bail out! */
165 list_for_each_entry(obj_priv
, &unwind_list
, evict_list
) {
166 ret
= drm_mm_scan_remove_block(obj_priv
->gtt_space
);
170 /* We expect the caller to unpin, evict all and try again, or give up.
171 * So calling i915_gem_evict_everything() is unnecessary.
176 INIT_LIST_HEAD(&eviction_list
);
177 list_for_each_entry_safe(obj_priv
, tmp_obj_priv
,
178 &unwind_list
, evict_list
) {
179 if (drm_mm_scan_remove_block(obj_priv
->gtt_space
)) {
180 /* drm_mm doesn't allow any other other operations while
181 * scanning, therefore store to be evicted objects on a
183 list_move(&obj_priv
->evict_list
, &eviction_list
);
187 /* Unbinding will emit any required flushes */
188 list_for_each_entry_safe(obj_priv
, tmp_obj_priv
,
189 &eviction_list
, evict_list
) {
191 DRM_INFO("%s: evicting %p\n", __func__
, obj
);
193 ret
= i915_gem_object_unbind(&obj_priv
->base
);
198 /* The just created free hole should be on the top of the free stack
199 * maintained by drm_mm, so this BUG_ON actually executes in O(1).
200 * Furthermore all accessed data has just recently been used, so it
201 * should be really fast, too. */
202 BUG_ON(!drm_mm_search_free(&dev_priv
->mm
.gtt_space
, min_size
,
209 i915_gem_evict_everything(struct drm_device
*dev
)
211 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
215 spin_lock(&dev_priv
->mm
.active_list_lock
);
216 lists_empty
= (list_empty(&dev_priv
->mm
.inactive_list
) &&
217 list_empty(&dev_priv
->mm
.flushing_list
) &&
218 list_empty(&dev_priv
->render_ring
.active_list
) &&
220 || list_empty(&dev_priv
->bsd_ring
.active_list
)));
221 spin_unlock(&dev_priv
->mm
.active_list_lock
);
226 /* Flush everything (on to the inactive lists) and evict */
227 ret
= i915_gpu_idle(dev
);
231 BUG_ON(!list_empty(&dev_priv
->mm
.flushing_list
));
233 ret
= i915_gem_evict_inactive(dev
);
237 spin_lock(&dev_priv
->mm
.active_list_lock
);
238 lists_empty
= (list_empty(&dev_priv
->mm
.inactive_list
) &&
239 list_empty(&dev_priv
->mm
.flushing_list
) &&
240 list_empty(&dev_priv
->render_ring
.active_list
) &&
242 || list_empty(&dev_priv
->bsd_ring
.active_list
)));
243 spin_unlock(&dev_priv
->mm
.active_list_lock
);
244 BUG_ON(!lists_empty
);
249 /** Unbinds all inactive objects. */
251 i915_gem_evict_inactive(struct drm_device
*dev
)
253 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
255 while (!list_empty(&dev_priv
->mm
.inactive_list
)) {
256 struct drm_gem_object
*obj
;
259 obj
= &list_first_entry(&dev_priv
->mm
.inactive_list
,
260 struct drm_i915_gem_object
,
263 ret
= i915_gem_object_unbind(obj
);
265 DRM_ERROR("Error unbinding object: %d\n", ret
);