drm/radeon/kms: implement bo busy check + current domain
[linux-2.6/mini2440.git] / drivers / gpu / drm / radeon / radeon_object.c
blobb85fb83d7ae84610f77a031ff1cdb53bfc0e099f
1 /*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
32 #include <linux/list.h>
33 #include <drm/drmP.h>
34 #include "radeon_drm.h"
35 #include "radeon.h"
37 struct radeon_object {
38 struct ttm_buffer_object tobj;
39 struct list_head list;
40 struct radeon_device *rdev;
41 struct drm_gem_object *gobj;
42 struct ttm_bo_kmap_obj kmap;
43 unsigned pin_count;
44 uint64_t gpu_addr;
45 void *kptr;
46 bool is_iomem;
47 uint32_t tiling_flags;
48 uint32_t pitch;
49 int surface_reg;
52 int radeon_ttm_init(struct radeon_device *rdev);
53 void radeon_ttm_fini(struct radeon_device *rdev);
56 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
57 * function are calling it.
60 static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
62 return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
65 static void radeon_object_unreserve(struct radeon_object *robj)
67 ttm_bo_unreserve(&robj->tobj);
70 static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
72 struct radeon_object *robj;
74 robj = container_of(tobj, struct radeon_object, tobj);
75 list_del_init(&robj->list);
76 radeon_object_clear_surface_reg(robj);
77 kfree(robj);
80 static inline void radeon_object_gpu_addr(struct radeon_object *robj)
82 /* Default gpu address */
83 robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
84 if (robj->tobj.mem.mm_node == NULL) {
85 return;
87 robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
88 switch (robj->tobj.mem.mem_type) {
89 case TTM_PL_VRAM:
90 robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
91 break;
92 case TTM_PL_TT:
93 robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
94 break;
95 default:
96 DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
97 robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
98 return;
102 static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
104 uint32_t flags = 0;
105 if (domain & RADEON_GEM_DOMAIN_VRAM) {
106 flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
108 if (domain & RADEON_GEM_DOMAIN_GTT) {
109 flags |= TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
111 if (domain & RADEON_GEM_DOMAIN_CPU) {
112 flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
114 if (!flags) {
115 flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
117 return flags;
120 int radeon_object_create(struct radeon_device *rdev,
121 struct drm_gem_object *gobj,
122 unsigned long size,
123 bool kernel,
124 uint32_t domain,
125 bool interruptible,
126 struct radeon_object **robj_ptr)
128 struct radeon_object *robj;
129 enum ttm_bo_type type;
130 uint32_t flags;
131 int r;
133 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
134 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
136 if (kernel) {
137 type = ttm_bo_type_kernel;
138 } else {
139 type = ttm_bo_type_device;
141 *robj_ptr = NULL;
142 robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
143 if (robj == NULL) {
144 return -ENOMEM;
146 robj->rdev = rdev;
147 robj->gobj = gobj;
148 robj->surface_reg = -1;
149 INIT_LIST_HEAD(&robj->list);
151 flags = radeon_object_flags_from_domain(domain);
152 r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
153 0, 0, false, NULL, size,
154 &radeon_ttm_object_object_destroy);
155 if (unlikely(r != 0)) {
156 /* ttm call radeon_ttm_object_object_destroy if error happen */
157 DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
158 size, flags, 0);
159 return r;
161 *robj_ptr = robj;
162 if (gobj) {
163 list_add_tail(&robj->list, &rdev->gem.objects);
165 return 0;
168 int radeon_object_kmap(struct radeon_object *robj, void **ptr)
170 int r;
172 spin_lock(&robj->tobj.lock);
173 if (robj->kptr) {
174 if (ptr) {
175 *ptr = robj->kptr;
177 spin_unlock(&robj->tobj.lock);
178 return 0;
180 spin_unlock(&robj->tobj.lock);
181 r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
182 if (r) {
183 return r;
185 spin_lock(&robj->tobj.lock);
186 robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
187 spin_unlock(&robj->tobj.lock);
188 if (ptr) {
189 *ptr = robj->kptr;
191 return 0;
194 void radeon_object_kunmap(struct radeon_object *robj)
196 spin_lock(&robj->tobj.lock);
197 if (robj->kptr == NULL) {
198 spin_unlock(&robj->tobj.lock);
199 return;
201 robj->kptr = NULL;
202 spin_unlock(&robj->tobj.lock);
203 ttm_bo_kunmap(&robj->kmap);
206 void radeon_object_unref(struct radeon_object **robj)
208 struct ttm_buffer_object *tobj;
210 if ((*robj) == NULL) {
211 return;
213 tobj = &((*robj)->tobj);
214 ttm_bo_unref(&tobj);
215 if (tobj == NULL) {
216 *robj = NULL;
220 int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
222 *offset = robj->tobj.addr_space_offset;
223 return 0;
226 int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
227 uint64_t *gpu_addr)
229 uint32_t flags;
230 uint32_t tmp;
231 int r;
233 flags = radeon_object_flags_from_domain(domain);
234 spin_lock(&robj->tobj.lock);
235 if (robj->pin_count) {
236 robj->pin_count++;
237 if (gpu_addr != NULL) {
238 *gpu_addr = robj->gpu_addr;
240 spin_unlock(&robj->tobj.lock);
241 return 0;
243 spin_unlock(&robj->tobj.lock);
244 r = radeon_object_reserve(robj, false);
245 if (unlikely(r != 0)) {
246 DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
247 return r;
249 tmp = robj->tobj.mem.placement;
250 ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
251 robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
252 r = ttm_buffer_object_validate(&robj->tobj,
253 robj->tobj.proposed_placement,
254 false, false);
255 radeon_object_gpu_addr(robj);
256 if (gpu_addr != NULL) {
257 *gpu_addr = robj->gpu_addr;
259 robj->pin_count = 1;
260 if (unlikely(r != 0)) {
261 DRM_ERROR("radeon: failed to pin object.\n");
263 radeon_object_unreserve(robj);
264 return r;
267 void radeon_object_unpin(struct radeon_object *robj)
269 uint32_t flags;
270 int r;
272 spin_lock(&robj->tobj.lock);
273 if (!robj->pin_count) {
274 spin_unlock(&robj->tobj.lock);
275 printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
276 return;
278 robj->pin_count--;
279 if (robj->pin_count) {
280 spin_unlock(&robj->tobj.lock);
281 return;
283 spin_unlock(&robj->tobj.lock);
284 r = radeon_object_reserve(robj, false);
285 if (unlikely(r != 0)) {
286 DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
287 return;
289 flags = robj->tobj.mem.placement;
290 robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
291 r = ttm_buffer_object_validate(&robj->tobj,
292 robj->tobj.proposed_placement,
293 false, false);
294 if (unlikely(r != 0)) {
295 DRM_ERROR("radeon: failed to unpin buffer.\n");
297 radeon_object_unreserve(robj);
300 int radeon_object_wait(struct radeon_object *robj)
302 int r = 0;
304 /* FIXME: should use block reservation instead */
305 r = radeon_object_reserve(robj, true);
306 if (unlikely(r != 0)) {
307 DRM_ERROR("radeon: failed to reserve object for waiting.\n");
308 return r;
310 spin_lock(&robj->tobj.lock);
311 if (robj->tobj.sync_obj) {
312 r = ttm_bo_wait(&robj->tobj, true, true, false);
314 spin_unlock(&robj->tobj.lock);
315 radeon_object_unreserve(robj);
316 return r;
319 int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement)
321 int r = 0;
323 r = radeon_object_reserve(robj, true);
324 if (unlikely(r != 0)) {
325 DRM_ERROR("radeon: failed to reserve object for waiting.\n");
326 return r;
328 spin_lock(&robj->tobj.lock);
329 *cur_placement = robj->tobj.mem.mem_type;
330 if (robj->tobj.sync_obj) {
331 r = ttm_bo_wait(&robj->tobj, true, true, true);
333 spin_unlock(&robj->tobj.lock);
334 radeon_object_unreserve(robj);
335 return r;
338 int radeon_object_evict_vram(struct radeon_device *rdev)
340 if (rdev->flags & RADEON_IS_IGP) {
341 /* Useless to evict on IGP chips */
342 return 0;
344 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
347 void radeon_object_force_delete(struct radeon_device *rdev)
349 struct radeon_object *robj, *n;
350 struct drm_gem_object *gobj;
352 if (list_empty(&rdev->gem.objects)) {
353 return;
355 DRM_ERROR("Userspace still has active objects !\n");
356 list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
357 mutex_lock(&rdev->ddev->struct_mutex);
358 gobj = robj->gobj;
359 DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
360 gobj, robj, (unsigned long)gobj->size,
361 *((unsigned long *)&gobj->refcount));
362 list_del_init(&robj->list);
363 radeon_object_unref(&robj);
364 gobj->driver_private = NULL;
365 drm_gem_object_unreference(gobj);
366 mutex_unlock(&rdev->ddev->struct_mutex);
370 int radeon_object_init(struct radeon_device *rdev)
372 return radeon_ttm_init(rdev);
375 void radeon_object_fini(struct radeon_device *rdev)
377 radeon_ttm_fini(rdev);
380 void radeon_object_list_add_object(struct radeon_object_list *lobj,
381 struct list_head *head)
383 if (lobj->wdomain) {
384 list_add(&lobj->list, head);
385 } else {
386 list_add_tail(&lobj->list, head);
390 int radeon_object_list_reserve(struct list_head *head)
392 struct radeon_object_list *lobj;
393 struct list_head *i;
394 int r;
396 list_for_each(i, head) {
397 lobj = list_entry(i, struct radeon_object_list, list);
398 if (!lobj->robj->pin_count) {
399 r = radeon_object_reserve(lobj->robj, true);
400 if (unlikely(r != 0)) {
401 DRM_ERROR("radeon: failed to reserve object.\n");
402 return r;
404 } else {
407 return 0;
410 void radeon_object_list_unreserve(struct list_head *head)
412 struct radeon_object_list *lobj;
413 struct list_head *i;
415 list_for_each(i, head) {
416 lobj = list_entry(i, struct radeon_object_list, list);
417 if (!lobj->robj->pin_count) {
418 radeon_object_unreserve(lobj->robj);
419 } else {
424 int radeon_object_list_validate(struct list_head *head, void *fence)
426 struct radeon_object_list *lobj;
427 struct radeon_object *robj;
428 struct radeon_fence *old_fence = NULL;
429 struct list_head *i;
430 int r;
432 r = radeon_object_list_reserve(head);
433 if (unlikely(r != 0)) {
434 radeon_object_list_unreserve(head);
435 return r;
437 list_for_each(i, head) {
438 lobj = list_entry(i, struct radeon_object_list, list);
439 robj = lobj->robj;
440 if (!robj->pin_count) {
441 if (lobj->wdomain) {
442 robj->tobj.proposed_placement =
443 radeon_object_flags_from_domain(lobj->wdomain);
444 } else {
445 robj->tobj.proposed_placement =
446 radeon_object_flags_from_domain(lobj->rdomain);
448 r = ttm_buffer_object_validate(&robj->tobj,
449 robj->tobj.proposed_placement,
450 true, false);
451 if (unlikely(r)) {
452 DRM_ERROR("radeon: failed to validate.\n");
453 return r;
455 radeon_object_gpu_addr(robj);
457 lobj->gpu_offset = robj->gpu_addr;
458 lobj->tiling_flags = robj->tiling_flags;
459 if (fence) {
460 old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
461 robj->tobj.sync_obj = radeon_fence_ref(fence);
462 robj->tobj.sync_obj_arg = NULL;
464 if (old_fence) {
465 radeon_fence_unref(&old_fence);
468 return 0;
471 void radeon_object_list_unvalidate(struct list_head *head)
473 struct radeon_object_list *lobj;
474 struct radeon_fence *old_fence = NULL;
475 struct list_head *i;
477 list_for_each(i, head) {
478 lobj = list_entry(i, struct radeon_object_list, list);
479 old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
480 lobj->robj->tobj.sync_obj = NULL;
481 if (old_fence) {
482 radeon_fence_unref(&old_fence);
485 radeon_object_list_unreserve(head);
488 void radeon_object_list_clean(struct list_head *head)
490 radeon_object_list_unreserve(head);
493 int radeon_object_fbdev_mmap(struct radeon_object *robj,
494 struct vm_area_struct *vma)
496 return ttm_fbdev_mmap(vma, &robj->tobj);
499 unsigned long radeon_object_size(struct radeon_object *robj)
501 return robj->tobj.num_pages << PAGE_SHIFT;
504 int radeon_object_get_surface_reg(struct radeon_object *robj)
506 struct radeon_device *rdev = robj->rdev;
507 struct radeon_surface_reg *reg;
508 struct radeon_object *old_object;
509 int steal;
510 int i;
512 if (!robj->tiling_flags)
513 return 0;
515 if (robj->surface_reg >= 0) {
516 reg = &rdev->surface_regs[robj->surface_reg];
517 i = robj->surface_reg;
518 goto out;
521 steal = -1;
522 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
524 reg = &rdev->surface_regs[i];
525 if (!reg->robj)
526 break;
528 old_object = reg->robj;
529 if (old_object->pin_count == 0)
530 steal = i;
533 /* if we are all out */
534 if (i == RADEON_GEM_MAX_SURFACES) {
535 if (steal == -1)
536 return -ENOMEM;
537 /* find someone with a surface reg and nuke their BO */
538 reg = &rdev->surface_regs[steal];
539 old_object = reg->robj;
540 /* blow away the mapping */
541 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
542 ttm_bo_unmap_virtual(&old_object->tobj);
543 old_object->surface_reg = -1;
544 i = steal;
547 robj->surface_reg = i;
548 reg->robj = robj;
550 out:
551 radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch,
552 robj->tobj.mem.mm_node->start << PAGE_SHIFT,
553 robj->tobj.num_pages << PAGE_SHIFT);
554 return 0;
557 void radeon_object_clear_surface_reg(struct radeon_object *robj)
559 struct radeon_device *rdev = robj->rdev;
560 struct radeon_surface_reg *reg;
562 if (robj->surface_reg == -1)
563 return;
565 reg = &rdev->surface_regs[robj->surface_reg];
566 radeon_clear_surface_reg(rdev, robj->surface_reg);
568 reg->robj = NULL;
569 robj->surface_reg = -1;
572 void radeon_object_set_tiling_flags(struct radeon_object *robj,
573 uint32_t tiling_flags, uint32_t pitch)
575 robj->tiling_flags = tiling_flags;
576 robj->pitch = pitch;
579 void radeon_object_get_tiling_flags(struct radeon_object *robj,
580 uint32_t *tiling_flags,
581 uint32_t *pitch)
583 if (tiling_flags)
584 *tiling_flags = robj->tiling_flags;
585 if (pitch)
586 *pitch = robj->pitch;
589 int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
590 bool force_drop)
592 if (!(robj->tiling_flags & RADEON_TILING_SURFACE))
593 return 0;
595 if (force_drop) {
596 radeon_object_clear_surface_reg(robj);
597 return 0;
600 if (robj->tobj.mem.mem_type != TTM_PL_VRAM) {
601 if (!has_moved)
602 return 0;
604 if (robj->surface_reg >= 0)
605 radeon_object_clear_surface_reg(robj);
606 return 0;
609 if ((robj->surface_reg >= 0) && !has_moved)
610 return 0;
612 return radeon_object_get_surface_reg(robj);
615 void radeon_bo_move_notify(struct ttm_buffer_object *bo,
616 struct ttm_mem_reg *mem)
618 struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
619 radeon_object_check_tiling(robj, 0, 1);
622 void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
624 struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
625 radeon_object_check_tiling(robj, 0, 0);