drm/qxl: fix gaping memory hole
[linux-2.6/btrfs-unstable.git] / drivers / gpu / drm / qxl / qxl_release.c
blob29ab4ec44c40569e4da1a8244b4ee4a8504c2879
1 /*
2 * Copyright 2011 Red Hat, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 #include "qxl_drv.h"
23 #include "qxl_object.h"
24 #include <trace/events/fence.h>
27 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
28 * into 256 byte chunks for now - gives 16 cmds per page.
30 * use an ida to index into the chunks?
32 /* manage releaseables */
33 /* stack them 16 high for now -drawable object is 191 */
34 #define RELEASE_SIZE 256
35 #define RELEASES_PER_BO (4096 / RELEASE_SIZE)
36 /* put an alloc/dealloc surface cmd into one bo and round up to 128 */
37 #define SURFACE_RELEASE_SIZE 128
38 #define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
40 static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
41 static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
43 static const char *qxl_get_driver_name(struct fence *fence)
45 return "qxl";
48 static const char *qxl_get_timeline_name(struct fence *fence)
50 return "release";
53 static bool qxl_nop_signaling(struct fence *fence)
55 /* fences are always automatically signaled, so just pretend we did this.. */
56 return true;
59 static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
61 struct qxl_device *qdev;
62 struct qxl_release *release;
63 int count = 0, sc = 0;
64 bool have_drawable_releases;
65 unsigned long cur, end = jiffies + timeout;
67 qdev = container_of(fence->lock, struct qxl_device, release_lock);
68 release = container_of(fence, struct qxl_release, base);
69 have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
71 retry:
72 sc++;
74 if (fence_is_signaled(fence))
75 goto signaled;
77 qxl_io_notify_oom(qdev);
79 for (count = 0; count < 11; count++) {
80 if (!qxl_queue_garbage_collect(qdev, true))
81 break;
83 if (fence_is_signaled(fence))
84 goto signaled;
87 if (fence_is_signaled(fence))
88 goto signaled;
90 if (have_drawable_releases || sc < 4) {
91 if (sc > 2)
92 /* back off */
93 usleep_range(500, 1000);
95 if (time_after(jiffies, end))
96 return 0;
98 if (have_drawable_releases && sc > 300) {
99 FENCE_WARN(fence, "failed to wait on release %d "
100 "after spincount %d\n",
101 fence->context & ~0xf0000000, sc);
102 goto signaled;
104 goto retry;
107 * yeah, original sync_obj_wait gave up after 3 spins when
108 * have_drawable_releases is not set.
111 signaled:
112 cur = jiffies;
113 if (time_after(cur, end))
114 return 0;
115 return end - cur;
118 static const struct fence_ops qxl_fence_ops = {
119 .get_driver_name = qxl_get_driver_name,
120 .get_timeline_name = qxl_get_timeline_name,
121 .enable_signaling = qxl_nop_signaling,
122 .wait = qxl_fence_wait,
125 static uint64_t
126 qxl_release_alloc(struct qxl_device *qdev, int type,
127 struct qxl_release **ret)
129 struct qxl_release *release;
130 int handle;
131 size_t size = sizeof(*release);
133 release = kmalloc(size, GFP_KERNEL);
134 if (!release) {
135 DRM_ERROR("Out of memory\n");
136 return 0;
138 release->base.ops = NULL;
139 release->type = type;
140 release->release_offset = 0;
141 release->surface_release_id = 0;
142 INIT_LIST_HEAD(&release->bos);
144 idr_preload(GFP_KERNEL);
145 spin_lock(&qdev->release_idr_lock);
146 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
147 release->base.seqno = ++qdev->release_seqno;
148 spin_unlock(&qdev->release_idr_lock);
149 idr_preload_end();
150 if (handle < 0) {
151 kfree(release);
152 *ret = NULL;
153 return handle;
155 *ret = release;
156 QXL_INFO(qdev, "allocated release %lld\n", handle);
157 release->id = handle;
158 return handle;
161 static void
162 qxl_release_free_list(struct qxl_release *release)
164 while (!list_empty(&release->bos)) {
165 struct qxl_bo_list *entry;
166 struct qxl_bo *bo;
168 entry = container_of(release->bos.next,
169 struct qxl_bo_list, tv.head);
170 bo = to_qxl_bo(entry->tv.bo);
171 qxl_bo_unref(&bo);
172 list_del(&entry->tv.head);
173 kfree(entry);
177 void
178 qxl_release_free(struct qxl_device *qdev,
179 struct qxl_release *release)
181 QXL_INFO(qdev, "release %d, type %d\n", release->id,
182 release->type);
184 if (release->surface_release_id)
185 qxl_surface_id_dealloc(qdev, release->surface_release_id);
187 spin_lock(&qdev->release_idr_lock);
188 idr_remove(&qdev->release_idr, release->id);
189 spin_unlock(&qdev->release_idr_lock);
191 if (release->base.ops) {
192 WARN_ON(list_empty(&release->bos));
193 qxl_release_free_list(release);
195 fence_signal(&release->base);
196 fence_put(&release->base);
197 } else {
198 qxl_release_free_list(release);
199 kfree(release);
203 static int qxl_release_bo_alloc(struct qxl_device *qdev,
204 struct qxl_bo **bo)
206 int ret;
207 /* pin releases bo's they are too messy to evict */
208 ret = qxl_bo_create(qdev, PAGE_SIZE, false, true,
209 QXL_GEM_DOMAIN_VRAM, NULL,
210 bo);
211 return ret;
214 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
216 struct qxl_bo_list *entry;
218 list_for_each_entry(entry, &release->bos, tv.head) {
219 if (entry->tv.bo == &bo->tbo)
220 return 0;
223 entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
224 if (!entry)
225 return -ENOMEM;
227 qxl_bo_ref(bo);
228 entry->tv.bo = &bo->tbo;
229 list_add_tail(&entry->tv.head, &release->bos);
230 return 0;
233 static int qxl_release_validate_bo(struct qxl_bo *bo)
235 int ret;
237 if (!bo->pin_count) {
238 qxl_ttm_placement_from_domain(bo, bo->type, false);
239 ret = ttm_bo_validate(&bo->tbo, &bo->placement,
240 true, false);
241 if (ret)
242 return ret;
245 ret = reservation_object_reserve_shared(bo->tbo.resv);
246 if (ret)
247 return ret;
249 /* allocate a surface for reserved + validated buffers */
250 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
251 if (ret)
252 return ret;
253 return 0;
256 int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
258 int ret;
259 struct qxl_bo_list *entry;
261 /* if only one object on the release its the release itself
262 since these objects are pinned no need to reserve */
263 if (list_is_singular(&release->bos))
264 return 0;
266 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, !no_intr);
267 if (ret)
268 return ret;
270 list_for_each_entry(entry, &release->bos, tv.head) {
271 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
273 ret = qxl_release_validate_bo(bo);
274 if (ret) {
275 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
276 return ret;
279 return 0;
282 void qxl_release_backoff_reserve_list(struct qxl_release *release)
284 /* if only one object on the release its the release itself
285 since these objects are pinned no need to reserve */
286 if (list_is_singular(&release->bos))
287 return;
289 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
293 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
294 enum qxl_surface_cmd_type surface_cmd_type,
295 struct qxl_release *create_rel,
296 struct qxl_release **release)
298 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
299 int idr_ret;
300 struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
301 struct qxl_bo *bo;
302 union qxl_release_info *info;
304 /* stash the release after the create command */
305 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
306 if (idr_ret < 0)
307 return idr_ret;
308 bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
310 (*release)->release_offset = create_rel->release_offset + 64;
312 qxl_release_list_add(*release, bo);
314 info = qxl_release_map(qdev, *release);
315 info->id = idr_ret;
316 qxl_release_unmap(qdev, *release, info);
318 qxl_bo_unref(&bo);
319 return 0;
322 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
323 QXL_RELEASE_SURFACE_CMD, release, NULL);
326 int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
327 int type, struct qxl_release **release,
328 struct qxl_bo **rbo)
330 struct qxl_bo *bo;
331 int idr_ret;
332 int ret = 0;
333 union qxl_release_info *info;
334 int cur_idx;
336 if (type == QXL_RELEASE_DRAWABLE)
337 cur_idx = 0;
338 else if (type == QXL_RELEASE_SURFACE_CMD)
339 cur_idx = 1;
340 else if (type == QXL_RELEASE_CURSOR_CMD)
341 cur_idx = 2;
342 else {
343 DRM_ERROR("got illegal type: %d\n", type);
344 return -EINVAL;
347 idr_ret = qxl_release_alloc(qdev, type, release);
348 if (idr_ret < 0) {
349 if (rbo)
350 *rbo = NULL;
351 return idr_ret;
354 mutex_lock(&qdev->release_mutex);
355 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
356 qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
357 qdev->current_release_bo_offset[cur_idx] = 0;
358 qdev->current_release_bo[cur_idx] = NULL;
360 if (!qdev->current_release_bo[cur_idx]) {
361 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
362 if (ret) {
363 mutex_unlock(&qdev->release_mutex);
364 return ret;
368 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
370 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx];
371 qdev->current_release_bo_offset[cur_idx]++;
373 if (rbo)
374 *rbo = bo;
376 mutex_unlock(&qdev->release_mutex);
378 qxl_release_list_add(*release, bo);
380 info = qxl_release_map(qdev, *release);
381 info->id = idr_ret;
382 qxl_release_unmap(qdev, *release, info);
384 qxl_bo_unref(&bo);
385 return ret;
388 struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
389 uint64_t id)
391 struct qxl_release *release;
393 spin_lock(&qdev->release_idr_lock);
394 release = idr_find(&qdev->release_idr, id);
395 spin_unlock(&qdev->release_idr_lock);
396 if (!release) {
397 DRM_ERROR("failed to find id in release_idr\n");
398 return NULL;
401 return release;
404 union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
405 struct qxl_release *release)
407 void *ptr;
408 union qxl_release_info *info;
409 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
410 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
412 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
413 if (!ptr)
414 return NULL;
415 info = ptr + (release->release_offset & ~PAGE_SIZE);
416 return info;
419 void qxl_release_unmap(struct qxl_device *qdev,
420 struct qxl_release *release,
421 union qxl_release_info *info)
423 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
424 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
425 void *ptr;
427 ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
428 qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
431 void qxl_release_fence_buffer_objects(struct qxl_release *release)
433 struct ttm_buffer_object *bo;
434 struct ttm_bo_global *glob;
435 struct ttm_bo_device *bdev;
436 struct ttm_bo_driver *driver;
437 struct qxl_bo *qbo;
438 struct ttm_validate_buffer *entry;
439 struct qxl_device *qdev;
441 /* if only one object on the release its the release itself
442 since these objects are pinned no need to reserve */
443 if (list_is_singular(&release->bos))
444 return;
446 bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
447 bdev = bo->bdev;
448 qdev = container_of(bdev, struct qxl_device, mman.bdev);
451 * Since we never really allocated a context and we don't want to conflict,
452 * set the highest bits. This will break if we really allow exporting of dma-bufs.
454 fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
455 release->id | 0xf0000000, release->base.seqno);
456 trace_fence_emit(&release->base);
458 driver = bdev->driver;
459 glob = bo->glob;
461 spin_lock(&glob->lru_lock);
463 list_for_each_entry(entry, &release->bos, head) {
464 bo = entry->bo;
465 qbo = to_qxl_bo(bo);
467 reservation_object_add_shared_fence(bo->resv, &release->base);
468 ttm_bo_add_to_lru(bo);
469 __ttm_bo_unreserve(bo);
471 spin_unlock(&glob->lru_lock);
472 ww_acquire_fini(&release->ticket);