2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <drm/radeon_drm.h>
31 #include "radeon_gem.h"
33 void radeon_gem_object_free(struct drm_gem_object
*gobj
)
35 struct radeon_bo
*robj
= gem_to_radeon_bo(gobj
);
39 if (robj
->gem_base
.import_attach
)
40 drm_prime_gem_destroy(&robj
->gem_base
, robj
->tbo
.sg
);
41 #endif /* DUMBBELL_WIP */
42 radeon_mn_unregister(robj
);
43 radeon_bo_unref(&robj
);
47 int radeon_gem_object_create(struct radeon_device
*rdev
, unsigned long size
,
48 int alignment
, int initial_domain
,
49 u32 flags
, bool kernel
,
50 struct drm_gem_object
**obj
)
52 struct radeon_bo
*robj
;
53 unsigned long max_size
;
57 /* At least align on page size */
58 if (alignment
< PAGE_SIZE
) {
59 alignment
= PAGE_SIZE
;
62 /* Maximum bo size is the unpinned gtt size since we use the gtt to
63 * handle vram to system pool migrations.
65 max_size
= rdev
->mc
.gtt_size
- rdev
->gart_pin_size
;
66 if (size
> max_size
) {
67 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
68 size
>> 20, max_size
>> 20);
73 r
= radeon_bo_create(rdev
, size
, alignment
, kernel
, initial_domain
,
74 flags
, NULL
, NULL
, &robj
);
76 if (r
!= -ERESTARTSYS
) {
77 if (initial_domain
== RADEON_GEM_DOMAIN_VRAM
) {
78 initial_domain
|= RADEON_GEM_DOMAIN_GTT
;
81 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
82 size
, initial_domain
, alignment
, r
);
86 *obj
= &robj
->gem_base
;
87 robj
->pid
= curproc
? curproc
->p_pid
: 0;
89 mutex_lock(&rdev
->gem
.mutex
);
90 list_add_tail(&robj
->list
, &rdev
->gem
.objects
);
91 mutex_unlock(&rdev
->gem
.mutex
);
96 static int radeon_gem_set_domain(struct drm_gem_object
*gobj
,
97 uint32_t rdomain
, uint32_t wdomain
)
99 struct radeon_bo
*robj
;
103 /* FIXME: reeimplement */
104 robj
= gem_to_radeon_bo(gobj
);
105 /* work out where to validate the buffer to */
112 pr_warn("Set domain without domain !\n");
115 if (domain
== RADEON_GEM_DOMAIN_CPU
) {
116 /* Asking for cpu access wait for object idle */
117 r
= reservation_object_wait_timeout_rcu(robj
->tbo
.resv
, true, true, 30 * HZ
);
121 if (r
< 0 && r
!= -EINTR
) {
122 pr_err("Failed to wait for object: %li\n", r
);
126 if (domain
== RADEON_GEM_DOMAIN_VRAM
&& robj
->prime_shared_count
) {
127 /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
133 int radeon_gem_init(struct radeon_device
*rdev
)
135 INIT_LIST_HEAD(&rdev
->gem
.objects
);
139 void radeon_gem_fini(struct radeon_device
*rdev
)
141 radeon_bo_force_delete(rdev
);
145 * Call from drm_gem_handle_create which appear in both new and open ioctl
148 int radeon_gem_object_open(struct drm_gem_object
*obj
, struct drm_file
*file_priv
)
150 struct radeon_bo
*rbo
= gem_to_radeon_bo(obj
);
151 struct radeon_device
*rdev
= rbo
->rdev
;
152 struct radeon_fpriv
*fpriv
= file_priv
->driver_priv
;
153 struct radeon_vm
*vm
= &fpriv
->vm
;
154 struct radeon_bo_va
*bo_va
;
157 if ((rdev
->family
< CHIP_CAYMAN
) ||
158 (!rdev
->accel_working
)) {
162 r
= radeon_bo_reserve(rbo
, false);
167 bo_va
= radeon_vm_bo_find(vm
, rbo
);
169 bo_va
= radeon_vm_bo_add(rdev
, vm
, rbo
);
173 radeon_bo_unreserve(rbo
);
178 void radeon_gem_object_close(struct drm_gem_object
*obj
,
179 struct drm_file
*file_priv
)
181 struct radeon_bo
*rbo
= gem_to_radeon_bo(obj
);
182 struct radeon_device
*rdev
= rbo
->rdev
;
183 struct radeon_fpriv
*fpriv
= file_priv
->driver_priv
;
184 struct radeon_vm
*vm
= &fpriv
->vm
;
185 struct radeon_bo_va
*bo_va
;
188 if ((rdev
->family
< CHIP_CAYMAN
) ||
189 (!rdev
->accel_working
)) {
193 r
= radeon_bo_reserve(rbo
, true);
195 dev_err(rdev
->dev
, "leaking bo va because "
196 "we fail to reserve bo (%d)\n", r
);
199 bo_va
= radeon_vm_bo_find(vm
, rbo
);
201 if (--bo_va
->ref_count
== 0) {
202 radeon_vm_bo_rmv(rdev
, bo_va
);
205 radeon_bo_unreserve(rbo
);
208 static int radeon_gem_handle_lockup(struct radeon_device
*rdev
, int r
)
211 r
= radeon_gpu_reset(rdev
);
221 int radeon_gem_info_ioctl(struct drm_device
*dev
, void *data
,
222 struct drm_file
*filp
)
224 struct radeon_device
*rdev
= dev
->dev_private
;
225 struct drm_radeon_gem_info
*args
= data
;
226 struct ttm_mem_type_manager
*man
;
228 man
= &rdev
->mman
.bdev
.man
[TTM_PL_VRAM
];
230 args
->vram_size
= (u64
)man
->size
<< PAGE_SHIFT
;
231 args
->vram_visible
= rdev
->mc
.visible_vram_size
;
232 args
->vram_visible
-= rdev
->vram_pin_size
;
233 args
->gart_size
= rdev
->mc
.gtt_size
;
234 args
->gart_size
-= rdev
->gart_pin_size
;
239 int radeon_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
240 struct drm_file
*filp
)
242 /* TODO: implement */
243 DRM_ERROR("unimplemented %s\n", __func__
);
247 int radeon_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
248 struct drm_file
*filp
)
250 /* TODO: implement */
251 DRM_ERROR("unimplemented %s\n", __func__
);
255 int radeon_gem_create_ioctl(struct drm_device
*dev
, void *data
,
256 struct drm_file
*filp
)
258 struct radeon_device
*rdev
= dev
->dev_private
;
259 struct drm_radeon_gem_create
*args
= data
;
260 struct drm_gem_object
*gobj
;
264 down_read(&rdev
->exclusive_lock
);
265 /* create a gem object to contain this object in */
266 args
->size
= roundup(args
->size
, PAGE_SIZE
);
267 r
= radeon_gem_object_create(rdev
, args
->size
, args
->alignment
,
268 args
->initial_domain
, args
->flags
,
271 if (r
== -ERESTARTSYS
)
273 up_read(&rdev
->exclusive_lock
);
274 r
= radeon_gem_handle_lockup(rdev
, r
);
277 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
278 /* drop reference from allocate - handle holds it now */
279 drm_gem_object_put_unlocked(gobj
);
281 up_read(&rdev
->exclusive_lock
);
282 r
= radeon_gem_handle_lockup(rdev
, r
);
285 args
->handle
= handle
;
286 up_read(&rdev
->exclusive_lock
);
291 int radeon_gem_userptr_ioctl(struct drm_device
*dev
, void *data
,
292 struct drm_file
*filp
)
294 struct radeon_device
*rdev
= dev
->dev_private
;
295 struct drm_radeon_gem_userptr
*args
= data
;
296 struct drm_gem_object
*gobj
;
297 struct radeon_bo
*bo
;
301 if (offset_in_page(args
->addr
| args
->size
))
304 /* reject unknown flag values */
305 if (args
->flags
& ~(RADEON_GEM_USERPTR_READONLY
|
306 RADEON_GEM_USERPTR_ANONONLY
| RADEON_GEM_USERPTR_VALIDATE
|
307 RADEON_GEM_USERPTR_REGISTER
))
310 if (args
->flags
& RADEON_GEM_USERPTR_READONLY
) {
311 /* readonly pages not tested on older hardware */
312 if (rdev
->family
< CHIP_R600
)
315 } else if (!(args
->flags
& RADEON_GEM_USERPTR_ANONONLY
) ||
316 !(args
->flags
& RADEON_GEM_USERPTR_REGISTER
)) {
318 /* if we want to write to it we must require anonymous
319 memory and install a MMU notifier */
323 down_read(&rdev
->exclusive_lock
);
325 /* create a gem object to contain this object in */
326 r
= radeon_gem_object_create(rdev
, args
->size
, 0,
327 RADEON_GEM_DOMAIN_CPU
, 0,
332 bo
= gem_to_radeon_bo(gobj
);
333 r
= radeon_ttm_tt_set_userptr(bo
->tbo
.ttm
, args
->addr
, args
->flags
);
337 if (args
->flags
& RADEON_GEM_USERPTR_REGISTER
) {
338 r
= radeon_mn_register(bo
, args
->addr
);
343 if (args
->flags
& RADEON_GEM_USERPTR_VALIDATE
) {
344 down_read(¤t
->mm
->mmap_sem
);
345 r
= radeon_bo_reserve(bo
, true);
347 up_read(¤t
->mm
->mmap_sem
);
351 radeon_ttm_placement_from_domain(bo
, RADEON_GEM_DOMAIN_GTT
);
352 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, true, false);
353 radeon_bo_unreserve(bo
);
354 up_read(¤t
->mm
->mmap_sem
);
359 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
360 /* drop reference from allocate - handle holds it now */
361 drm_gem_object_put_unlocked(gobj
);
365 args
->handle
= handle
;
366 up_read(&rdev
->exclusive_lock
);
370 drm_gem_object_put_unlocked(gobj
);
373 up_read(&rdev
->exclusive_lock
);
374 r
= radeon_gem_handle_lockup(rdev
, r
);
380 int radeon_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
381 struct drm_file
*filp
)
383 /* transition the BO to a domain -
384 * just validate the BO into a certain domain */
385 struct radeon_device
*rdev
= dev
->dev_private
;
386 struct drm_radeon_gem_set_domain
*args
= data
;
387 struct drm_gem_object
*gobj
;
388 struct radeon_bo
*robj
;
391 /* for now if someone requests domain CPU -
392 * just make sure the buffer is finished with */
393 down_read(&rdev
->exclusive_lock
);
395 /* just do a BO wait for now */
396 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
398 up_read(&rdev
->exclusive_lock
);
401 robj
= gem_to_radeon_bo(gobj
);
403 r
= radeon_gem_set_domain(gobj
, args
->read_domains
, args
->write_domain
);
405 drm_gem_object_put_unlocked(gobj
);
406 up_read(&rdev
->exclusive_lock
);
407 r
= radeon_gem_handle_lockup(robj
->rdev
, r
);
411 int radeon_mode_dumb_mmap(struct drm_file
*filp
,
412 struct drm_device
*dev
,
413 uint32_t handle
, uint64_t *offset_p
)
415 struct drm_gem_object
*gobj
;
416 struct radeon_bo
*robj
;
418 gobj
= drm_gem_object_lookup(filp
, handle
);
422 robj
= gem_to_radeon_bo(gobj
);
424 if (radeon_ttm_tt_has_userptr(robj
->tbo
.ttm
)) {
425 drm_gem_object_put_unlocked(gobj
);
429 *offset_p
= radeon_bo_mmap_offset(robj
);
430 drm_gem_object_put_unlocked(gobj
);
434 int radeon_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
435 struct drm_file
*filp
)
437 struct drm_radeon_gem_mmap
*args
= data
;
439 return radeon_mode_dumb_mmap(filp
, dev
, args
->handle
, (uint64_t *)&args
->addr_ptr
);
442 int radeon_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
443 struct drm_file
*filp
)
445 struct drm_radeon_gem_busy
*args
= data
;
446 struct drm_gem_object
*gobj
;
447 struct radeon_bo
*robj
;
449 uint32_t cur_placement
= 0;
451 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
455 robj
= gem_to_radeon_bo(gobj
);
457 r
= reservation_object_test_signaled_rcu(robj
->tbo
.resv
, true);
463 cur_placement
= READ_ONCE(robj
->tbo
.mem
.mem_type
);
464 args
->domain
= radeon_mem_type_to_domain(cur_placement
);
465 drm_gem_object_put_unlocked(gobj
);
469 int radeon_gem_wait_idle_ioctl(struct drm_device
*dev
, void *data
,
470 struct drm_file
*filp
)
472 struct radeon_device
*rdev
= dev
->dev_private
;
473 struct drm_radeon_gem_wait_idle
*args
= data
;
474 struct drm_gem_object
*gobj
;
475 struct radeon_bo
*robj
;
477 uint32_t cur_placement
= 0;
480 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
484 robj
= gem_to_radeon_bo(gobj
);
486 ret
= reservation_object_wait_timeout_rcu(robj
->tbo
.resv
, true, true, 30 * HZ
);
492 /* Flush HDP cache via MMIO if necessary */
493 cur_placement
= READ_ONCE(robj
->tbo
.mem
.mem_type
);
494 if (rdev
->asic
->mmio_hdp_flush
&&
495 radeon_mem_type_to_domain(cur_placement
) == RADEON_GEM_DOMAIN_VRAM
)
496 robj
->rdev
->asic
->mmio_hdp_flush(rdev
);
497 drm_gem_object_put_unlocked(gobj
);
498 if (r
== -ERESTARTSYS
)
500 r
= radeon_gem_handle_lockup(rdev
, r
);
504 int radeon_gem_set_tiling_ioctl(struct drm_device
*dev
, void *data
,
505 struct drm_file
*filp
)
507 struct drm_radeon_gem_set_tiling
*args
= data
;
508 struct drm_gem_object
*gobj
;
509 struct radeon_bo
*robj
;
512 DRM_DEBUG("%d \n", args
->handle
);
513 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
516 robj
= gem_to_radeon_bo(gobj
);
517 r
= radeon_bo_set_tiling_flags(robj
, args
->tiling_flags
, args
->pitch
);
518 drm_gem_object_put_unlocked(gobj
);
522 int radeon_gem_get_tiling_ioctl(struct drm_device
*dev
, void *data
,
523 struct drm_file
*filp
)
525 struct drm_radeon_gem_get_tiling
*args
= data
;
526 struct drm_gem_object
*gobj
;
527 struct radeon_bo
*rbo
;
531 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
534 rbo
= gem_to_radeon_bo(gobj
);
535 r
= radeon_bo_reserve(rbo
, false);
536 if (unlikely(r
!= 0))
538 radeon_bo_get_tiling_flags(rbo
, &args
->tiling_flags
, &args
->pitch
);
539 radeon_bo_unreserve(rbo
);
541 drm_gem_object_put_unlocked(gobj
);
546 * radeon_gem_va_update_vm -update the bo_va in its VM
548 * @rdev: radeon_device pointer
549 * @bo_va: bo_va to update
551 * Update the bo_va directly after setting it's address. Errors are not
552 * vital here, so they are not reported back to userspace.
554 static void radeon_gem_va_update_vm(struct radeon_device
*rdev
,
555 struct radeon_bo_va
*bo_va
)
557 struct ttm_validate_buffer tv
, *entry
;
558 struct radeon_bo_list
*vm_bos
;
559 struct ww_acquire_ctx ticket
;
560 struct list_head list
;
564 INIT_LIST_HEAD(&list
);
566 tv
.bo
= &bo_va
->bo
->tbo
;
568 list_add(&tv
.head
, &list
);
570 vm_bos
= radeon_vm_get_bos(rdev
, bo_va
->vm
, &list
);
574 r
= ttm_eu_reserve_buffers(&ticket
, &list
, true, NULL
);
578 list_for_each_entry(entry
, &list
, head
) {
579 domain
= radeon_mem_type_to_domain(entry
->bo
->mem
.mem_type
);
580 /* if anything is swapped out don't swap it in here,
581 just abort and wait for the next CS */
582 if (domain
== RADEON_GEM_DOMAIN_CPU
)
583 goto error_unreserve
;
586 mutex_lock(&bo_va
->vm
->mutex
);
587 r
= radeon_vm_clear_freed(rdev
, bo_va
->vm
);
592 r
= radeon_vm_bo_update(rdev
, bo_va
, &bo_va
->bo
->tbo
.mem
);
595 mutex_unlock(&bo_va
->vm
->mutex
);
598 ttm_eu_backoff_reservation(&ticket
, &list
);
603 if (r
&& r
!= -ERESTARTSYS
)
604 DRM_ERROR("Couldn't update BO_VA (%d)\n", r
);
607 int radeon_gem_va_ioctl(struct drm_device
*dev
, void *data
,
608 struct drm_file
*filp
)
610 struct drm_radeon_gem_va
*args
= data
;
611 struct drm_gem_object
*gobj
;
612 struct radeon_device
*rdev
= dev
->dev_private
;
613 struct radeon_fpriv
*fpriv
= filp
->driver_priv
;
614 struct radeon_bo
*rbo
;
615 struct radeon_bo_va
*bo_va
;
619 if (!rdev
->vm_manager
.enabled
) {
620 args
->operation
= RADEON_VA_RESULT_ERROR
;
625 * We don't support vm_id yet, to be sure we don't have have broken
626 * userspace, reject anyone trying to use non 0 value thus moving
627 * forward we can use those fields without breaking existant userspace
630 args
->operation
= RADEON_VA_RESULT_ERROR
;
634 if (args
->offset
< RADEON_VA_RESERVED_SIZE
) {
635 dev_err(&dev
->pdev
->dev
,
636 "offset 0x%lX is in reserved area 0x%X\n",
637 (unsigned long)args
->offset
,
638 RADEON_VA_RESERVED_SIZE
);
639 args
->operation
= RADEON_VA_RESULT_ERROR
;
643 /* don't remove, we need to enforce userspace to set the snooped flag
644 * otherwise we will endup with broken userspace and we won't be able
645 * to enable this feature without adding new interface
647 invalid_flags
= RADEON_VM_PAGE_VALID
| RADEON_VM_PAGE_SYSTEM
;
648 if ((args
->flags
& invalid_flags
)) {
649 dev_err(&dev
->pdev
->dev
, "invalid flags 0x%08X vs 0x%08X\n",
650 args
->flags
, invalid_flags
);
651 args
->operation
= RADEON_VA_RESULT_ERROR
;
655 switch (args
->operation
) {
657 case RADEON_VA_UNMAP
:
660 dev_err(&dev
->pdev
->dev
, "unsupported operation %d\n",
662 args
->operation
= RADEON_VA_RESULT_ERROR
;
666 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
668 args
->operation
= RADEON_VA_RESULT_ERROR
;
671 rbo
= gem_to_radeon_bo(gobj
);
672 r
= radeon_bo_reserve(rbo
, false);
674 args
->operation
= RADEON_VA_RESULT_ERROR
;
675 drm_gem_object_put_unlocked(gobj
);
678 bo_va
= radeon_vm_bo_find(&fpriv
->vm
, rbo
);
680 args
->operation
= RADEON_VA_RESULT_ERROR
;
681 radeon_bo_unreserve(rbo
);
682 drm_gem_object_put_unlocked(gobj
);
686 switch (args
->operation
) {
688 if (bo_va
->it
.start
) {
689 args
->operation
= RADEON_VA_RESULT_VA_EXIST
;
690 args
->offset
= bo_va
->it
.start
* RADEON_GPU_PAGE_SIZE
;
691 radeon_bo_unreserve(rbo
);
694 r
= radeon_vm_bo_set_addr(rdev
, bo_va
, args
->offset
, args
->flags
);
696 case RADEON_VA_UNMAP
:
697 r
= radeon_vm_bo_set_addr(rdev
, bo_va
, 0, 0);
703 radeon_gem_va_update_vm(rdev
, bo_va
);
704 args
->operation
= RADEON_VA_RESULT_OK
;
706 args
->operation
= RADEON_VA_RESULT_ERROR
;
709 drm_gem_object_put_unlocked(gobj
);
713 int radeon_gem_op_ioctl(struct drm_device
*dev
, void *data
,
714 struct drm_file
*filp
)
716 struct drm_radeon_gem_op
*args
= data
;
717 struct drm_gem_object
*gobj
;
718 struct radeon_bo
*robj
;
721 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
725 robj
= gem_to_radeon_bo(gobj
);
729 if (radeon_ttm_tt_has_userptr(robj
->tbo
.ttm
))
733 r
= radeon_bo_reserve(robj
, false);
738 case RADEON_GEM_OP_GET_INITIAL_DOMAIN
:
739 args
->value
= robj
->initial_domain
;
741 case RADEON_GEM_OP_SET_INITIAL_DOMAIN
:
742 robj
->initial_domain
= args
->value
& (RADEON_GEM_DOMAIN_VRAM
|
743 RADEON_GEM_DOMAIN_GTT
|
744 RADEON_GEM_DOMAIN_CPU
);
750 radeon_bo_unreserve(robj
);
752 drm_gem_object_put_unlocked(gobj
);
756 int radeon_mode_dumb_create(struct drm_file
*file_priv
,
757 struct drm_device
*dev
,
758 struct drm_mode_create_dumb
*args
)
760 struct radeon_device
*rdev
= dev
->dev_private
;
761 struct drm_gem_object
*gobj
;
765 args
->pitch
= radeon_align_pitch(rdev
, args
->width
,
766 DIV_ROUND_UP(args
->bpp
, 8), 0);
767 args
->size
= args
->pitch
* args
->height
;
768 args
->size
= ALIGN(args
->size
, PAGE_SIZE
);
770 r
= radeon_gem_object_create(rdev
, args
->size
, 0,
771 RADEON_GEM_DOMAIN_VRAM
, 0,
776 r
= drm_gem_handle_create(file_priv
, gobj
, &handle
);
777 /* drop reference from allocate - handle holds it now */
778 drm_gem_object_put_unlocked(gobj
);
782 args
->handle
= handle
;
786 #if defined(CONFIG_DEBUG_FS)
787 static int radeon_debugfs_gem_info(struct seq_file
*m
, void *data
)
789 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
790 struct drm_device
*dev
= node
->minor
->dev
;
791 struct radeon_device
*rdev
= dev
->dev_private
;
792 struct radeon_bo
*rbo
;
795 mutex_lock(&rdev
->gem
.mutex
);
796 list_for_each_entry(rbo
, &rdev
->gem
.objects
, list
) {
798 const char *placement
;
800 domain
= radeon_mem_type_to_domain(rbo
->tbo
.mem
.mem_type
);
802 case RADEON_GEM_DOMAIN_VRAM
:
805 case RADEON_GEM_DOMAIN_GTT
:
808 case RADEON_GEM_DOMAIN_CPU
:
813 seq_printf(m
, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
814 i
, radeon_bo_size(rbo
) >> 10, radeon_bo_size(rbo
) >> 20,
815 placement
, (unsigned long)rbo
->pid
);
818 mutex_unlock(&rdev
->gem
.mutex
);
822 static struct drm_info_list radeon_debugfs_gem_list
[] = {
823 {"radeon_gem_info", &radeon_debugfs_gem_info
, 0, NULL
},
827 int radeon_gem_debugfs_init(struct radeon_device
*rdev
)
829 #if defined(CONFIG_DEBUG_FS)
830 return radeon_debugfs_add_files(rdev
, radeon_debugfs_gem_list
, 1);