2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
31 #include "radeon_drm.h"
32 #include "radeon_reg.h"
36 int radeon_debugfs_ib_init(struct radeon_device
*rdev
);
37 int radeon_debugfs_ring_init(struct radeon_device
*rdev
, struct radeon_ring
*ring
);
39 u32
radeon_get_ib_value(struct radeon_cs_parser
*p
, int idx
)
41 struct radeon_cs_chunk
*ibc
= &p
->chunks
[p
->chunk_ib_idx
];
42 u32 pg_idx
, pg_offset
;
46 pg_idx
= (idx
* 4) / PAGE_SIZE
;
47 pg_offset
= (idx
* 4) % PAGE_SIZE
;
49 if (ibc
->kpage_idx
[0] == pg_idx
)
50 return ibc
->kpage
[0][pg_offset
/4];
51 if (ibc
->kpage_idx
[1] == pg_idx
)
52 return ibc
->kpage
[1][pg_offset
/4];
54 new_page
= radeon_cs_update_pages(p
, pg_idx
);
56 p
->parser_error
= new_page
;
60 idx_value
= ibc
->kpage
[new_page
][pg_offset
/4];
64 void radeon_ring_write(struct radeon_ring
*ring
, uint32_t v
)
67 if (ring
->count_dw
<= 0) {
68 DRM_ERROR("radeon: writting more dword to ring than expected !\n");
71 ring
->ring
[ring
->wptr
++] = v
;
72 ring
->wptr
&= ring
->ptr_mask
;
80 bool radeon_ib_try_free(struct radeon_device
*rdev
, struct radeon_ib
*ib
)
84 /* only free ib which have been emited */
85 if (ib
->fence
&& ib
->fence
->seq
< RADEON_FENCE_NOTEMITED_SEQ
) {
86 if (radeon_fence_signaled(ib
->fence
)) {
87 radeon_fence_unref(&ib
->fence
);
88 radeon_sa_bo_free(rdev
, &ib
->sa_bo
);
95 int radeon_ib_get(struct radeon_device
*rdev
, int ring
,
96 struct radeon_ib
**ib
, unsigned size
)
98 struct radeon_fence
*fence
;
103 /* align size on 256 bytes */
104 size
= ALIGN(size
, 256);
106 r
= radeon_fence_create(rdev
, &fence
, ring
);
108 dev_err(rdev
->dev
, "failed to create fence for new IB\n");
112 radeon_mutex_lock(&rdev
->ib_pool
.mutex
);
113 idx
= rdev
->ib_pool
.head_id
;
116 dev_err(rdev
->dev
, "failed to get an ib after 5 retry\n");
117 radeon_mutex_unlock(&rdev
->ib_pool
.mutex
);
118 radeon_fence_unref(&fence
);
122 for (i
= 0; i
< RADEON_IB_POOL_SIZE
; i
++) {
123 radeon_ib_try_free(rdev
, &rdev
->ib_pool
.ibs
[idx
]);
124 if (rdev
->ib_pool
.ibs
[idx
].fence
== NULL
) {
125 r
= radeon_sa_bo_new(rdev
, &rdev
->ib_pool
.sa_manager
,
126 &rdev
->ib_pool
.ibs
[idx
].sa_bo
,
129 *ib
= &rdev
->ib_pool
.ibs
[idx
];
130 (*ib
)->ptr
= rdev
->ib_pool
.sa_manager
.cpu_ptr
;
131 (*ib
)->ptr
+= ((*ib
)->sa_bo
.offset
>> 2);
132 (*ib
)->gpu_addr
= rdev
->ib_pool
.sa_manager
.gpu_addr
;
133 (*ib
)->gpu_addr
+= (*ib
)->sa_bo
.offset
;
134 (*ib
)->fence
= fence
;
136 (*ib
)->is_const_ib
= false;
137 /* ib are most likely to be allocated in a ring fashion
138 * thus rdev->ib_pool.head_id should be the id of the
141 rdev
->ib_pool
.head_id
= (1 + idx
);
142 rdev
->ib_pool
.head_id
&= (RADEON_IB_POOL_SIZE
- 1);
143 radeon_mutex_unlock(&rdev
->ib_pool
.mutex
);
147 idx
= (idx
+ 1) & (RADEON_IB_POOL_SIZE
- 1);
149 /* this should be rare event, ie all ib scheduled none signaled yet.
151 for (i
= 0; i
< RADEON_IB_POOL_SIZE
; i
++) {
152 struct radeon_fence
*fence
= rdev
->ib_pool
.ibs
[idx
].fence
;
153 if (fence
&& fence
->seq
< RADEON_FENCE_NOTEMITED_SEQ
) {
154 r
= radeon_fence_wait(fence
, false);
158 /* an error happened */
161 idx
= (idx
+ 1) & (RADEON_IB_POOL_SIZE
- 1);
163 radeon_mutex_unlock(&rdev
->ib_pool
.mutex
);
164 radeon_fence_unref(&fence
);
168 void radeon_ib_free(struct radeon_device
*rdev
, struct radeon_ib
**ib
)
170 struct radeon_ib
*tmp
= *ib
;
176 radeon_mutex_lock(&rdev
->ib_pool
.mutex
);
177 if (tmp
->fence
&& tmp
->fence
->seq
== RADEON_FENCE_NOTEMITED_SEQ
) {
178 radeon_sa_bo_free(rdev
, &tmp
->sa_bo
);
179 radeon_fence_unref(&tmp
->fence
);
181 radeon_mutex_unlock(&rdev
->ib_pool
.mutex
);
184 int radeon_ib_schedule(struct radeon_device
*rdev
, struct radeon_ib
*ib
)
186 struct radeon_ring
*ring
= &rdev
->ring
[ib
->fence
->ring
];
189 if (!ib
->length_dw
|| !ring
->ready
) {
190 /* TODO: Nothings in the ib we should report. */
191 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib
->idx
);
195 /* 64 dwords should be enough for fence too */
196 r
= radeon_ring_lock(rdev
, ring
, 64);
198 DRM_ERROR("radeon: scheduling IB failed (%d).\n", r
);
201 radeon_ring_ib_execute(rdev
, ib
->fence
->ring
, ib
);
202 radeon_fence_emit(rdev
, ib
->fence
);
203 radeon_ring_unlock_commit(rdev
, ring
);
207 int radeon_ib_pool_init(struct radeon_device
*rdev
)
209 struct radeon_sa_manager tmp
;
212 r
= radeon_sa_bo_manager_init(rdev
, &tmp
,
213 RADEON_IB_POOL_SIZE
*64*1024,
214 RADEON_GEM_DOMAIN_GTT
);
219 radeon_mutex_lock(&rdev
->ib_pool
.mutex
);
220 if (rdev
->ib_pool
.ready
) {
221 radeon_mutex_unlock(&rdev
->ib_pool
.mutex
);
222 radeon_sa_bo_manager_fini(rdev
, &tmp
);
226 rdev
->ib_pool
.sa_manager
= tmp
;
227 INIT_LIST_HEAD(&rdev
->ib_pool
.sa_manager
.sa_bo
);
228 for (i
= 0; i
< RADEON_IB_POOL_SIZE
; i
++) {
229 rdev
->ib_pool
.ibs
[i
].fence
= NULL
;
230 rdev
->ib_pool
.ibs
[i
].idx
= i
;
231 rdev
->ib_pool
.ibs
[i
].length_dw
= 0;
232 INIT_LIST_HEAD(&rdev
->ib_pool
.ibs
[i
].sa_bo
.list
);
234 rdev
->ib_pool
.head_id
= 0;
235 rdev
->ib_pool
.ready
= true;
236 DRM_INFO("radeon: ib pool ready.\n");
238 if (radeon_debugfs_ib_init(rdev
)) {
239 DRM_ERROR("Failed to register debugfs file for IB !\n");
241 radeon_mutex_unlock(&rdev
->ib_pool
.mutex
);
245 void radeon_ib_pool_fini(struct radeon_device
*rdev
)
249 radeon_mutex_lock(&rdev
->ib_pool
.mutex
);
250 if (rdev
->ib_pool
.ready
) {
251 for (i
= 0; i
< RADEON_IB_POOL_SIZE
; i
++) {
252 radeon_sa_bo_free(rdev
, &rdev
->ib_pool
.ibs
[i
].sa_bo
);
253 radeon_fence_unref(&rdev
->ib_pool
.ibs
[i
].fence
);
255 radeon_sa_bo_manager_fini(rdev
, &rdev
->ib_pool
.sa_manager
);
256 rdev
->ib_pool
.ready
= false;
258 radeon_mutex_unlock(&rdev
->ib_pool
.mutex
);
261 int radeon_ib_pool_start(struct radeon_device
*rdev
)
263 return radeon_sa_bo_manager_start(rdev
, &rdev
->ib_pool
.sa_manager
);
266 int radeon_ib_pool_suspend(struct radeon_device
*rdev
)
268 return radeon_sa_bo_manager_suspend(rdev
, &rdev
->ib_pool
.sa_manager
);
271 int radeon_ib_ring_tests(struct radeon_device
*rdev
)
276 for (i
= 0; i
< RADEON_NUM_RINGS
; ++i
) {
277 struct radeon_ring
*ring
= &rdev
->ring
[i
];
282 r
= radeon_ib_test(rdev
, i
, ring
);
286 if (i
== RADEON_RING_TYPE_GFX_INDEX
) {
287 /* oh, oh, that's really bad */
288 DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r
);
289 rdev
->accel_working
= false;
293 /* still not good, but we can live with it */
294 DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i
, r
);
304 int radeon_ring_index(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
306 /* r1xx-r5xx only has CP ring */
307 if (rdev
->family
< CHIP_R600
)
308 return RADEON_RING_TYPE_GFX_INDEX
;
310 if (rdev
->family
>= CHIP_CAYMAN
) {
311 if (ring
== &rdev
->ring
[CAYMAN_RING_TYPE_CP1_INDEX
])
312 return CAYMAN_RING_TYPE_CP1_INDEX
;
313 else if (ring
== &rdev
->ring
[CAYMAN_RING_TYPE_CP2_INDEX
])
314 return CAYMAN_RING_TYPE_CP2_INDEX
;
316 return RADEON_RING_TYPE_GFX_INDEX
;
319 void radeon_ring_free_size(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
323 if (rdev
->wb
.enabled
)
324 rptr
= le32_to_cpu(rdev
->wb
.wb
[ring
->rptr_offs
/4]);
326 rptr
= RREG32(ring
->rptr_reg
);
327 ring
->rptr
= (rptr
& ring
->ptr_reg_mask
) >> ring
->ptr_reg_shift
;
328 /* This works because ring_size is a power of 2 */
329 ring
->ring_free_dw
= (ring
->rptr
+ (ring
->ring_size
/ 4));
330 ring
->ring_free_dw
-= ring
->wptr
;
331 ring
->ring_free_dw
&= ring
->ptr_mask
;
332 if (!ring
->ring_free_dw
) {
333 ring
->ring_free_dw
= ring
->ring_size
/ 4;
338 int radeon_ring_alloc(struct radeon_device
*rdev
, struct radeon_ring
*ring
, unsigned ndw
)
342 /* Align requested size with padding so unlock_commit can
344 ndw
= (ndw
+ ring
->align_mask
) & ~ring
->align_mask
;
345 while (ndw
> (ring
->ring_free_dw
- 1)) {
346 radeon_ring_free_size(rdev
, ring
);
347 if (ndw
< ring
->ring_free_dw
) {
350 mutex_unlock(&rdev
->ring_lock
);
351 r
= radeon_fence_wait_next(rdev
, radeon_ring_index(rdev
, ring
));
352 mutex_lock(&rdev
->ring_lock
);
356 ring
->count_dw
= ndw
;
357 ring
->wptr_old
= ring
->wptr
;
361 int radeon_ring_lock(struct radeon_device
*rdev
, struct radeon_ring
*ring
, unsigned ndw
)
365 mutex_lock(&rdev
->ring_lock
);
366 r
= radeon_ring_alloc(rdev
, ring
, ndw
);
368 mutex_unlock(&rdev
->ring_lock
);
374 void radeon_ring_commit(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
376 unsigned count_dw_pad
;
379 /* We pad to match fetch size */
380 count_dw_pad
= (ring
->align_mask
+ 1) -
381 (ring
->wptr
& ring
->align_mask
);
382 for (i
= 0; i
< count_dw_pad
; i
++) {
383 radeon_ring_write(ring
, ring
->nop
);
386 WREG32(ring
->wptr_reg
, (ring
->wptr
<< ring
->ptr_reg_shift
) & ring
->ptr_reg_mask
);
387 (void)RREG32(ring
->wptr_reg
);
390 void radeon_ring_unlock_commit(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
392 radeon_ring_commit(rdev
, ring
);
393 mutex_unlock(&rdev
->ring_lock
);
396 void radeon_ring_undo(struct radeon_ring
*ring
)
398 ring
->wptr
= ring
->wptr_old
;
401 void radeon_ring_unlock_undo(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
403 radeon_ring_undo(ring
);
404 mutex_unlock(&rdev
->ring_lock
);
407 void radeon_ring_force_activity(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
411 mutex_lock(&rdev
->ring_lock
);
412 radeon_ring_free_size(rdev
, ring
);
413 if (ring
->rptr
== ring
->wptr
) {
414 r
= radeon_ring_alloc(rdev
, ring
, 1);
416 radeon_ring_write(ring
, ring
->nop
);
417 radeon_ring_commit(rdev
, ring
);
420 mutex_unlock(&rdev
->ring_lock
);
423 void radeon_ring_lockup_update(struct radeon_ring
*ring
)
425 ring
->last_rptr
= ring
->rptr
;
426 ring
->last_activity
= jiffies
;
430 * radeon_ring_test_lockup() - check if ring is lockedup by recording information
431 * @rdev: radeon device structure
432 * @ring: radeon_ring structure holding ring information
434 * We don't need to initialize the lockup tracking information as we will either
435 * have CP rptr to a different value of jiffies wrap around which will force
436 * initialization of the lockup tracking informations.
438 * A possible false positivie is if we get call after while and last_cp_rptr ==
439 * the current CP rptr, even if it's unlikely it might happen. To avoid this
440 * if the elapsed time since last call is bigger than 2 second than we return
441 * false and update the tracking information. Due to this the caller must call
442 * radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
443 * the fencing code should be cautious about that.
445 * Caller should write to the ring to force CP to do something so we don't get
446 * false positive when CP is just gived nothing to do.
449 bool radeon_ring_test_lockup(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
451 unsigned long cjiffies
, elapsed
;
455 if (!time_after(cjiffies
, ring
->last_activity
)) {
456 /* likely a wrap around */
457 radeon_ring_lockup_update(ring
);
460 rptr
= RREG32(ring
->rptr_reg
);
461 ring
->rptr
= (rptr
& ring
->ptr_reg_mask
) >> ring
->ptr_reg_shift
;
462 if (ring
->rptr
!= ring
->last_rptr
) {
463 /* CP is still working no lockup */
464 radeon_ring_lockup_update(ring
);
467 elapsed
= jiffies_to_msecs(cjiffies
- ring
->last_activity
);
468 if (radeon_lockup_timeout
&& elapsed
>= radeon_lockup_timeout
) {
469 dev_err(rdev
->dev
, "GPU lockup CP stall for more than %lumsec\n", elapsed
);
472 /* give a chance to the GPU ... */
476 int radeon_ring_init(struct radeon_device
*rdev
, struct radeon_ring
*ring
, unsigned ring_size
,
477 unsigned rptr_offs
, unsigned rptr_reg
, unsigned wptr_reg
,
478 u32 ptr_reg_shift
, u32 ptr_reg_mask
, u32 nop
)
482 ring
->ring_size
= ring_size
;
483 ring
->rptr_offs
= rptr_offs
;
484 ring
->rptr_reg
= rptr_reg
;
485 ring
->wptr_reg
= wptr_reg
;
486 ring
->ptr_reg_shift
= ptr_reg_shift
;
487 ring
->ptr_reg_mask
= ptr_reg_mask
;
489 /* Allocate ring buffer */
490 if (ring
->ring_obj
== NULL
) {
491 r
= radeon_bo_create(rdev
, ring
->ring_size
, PAGE_SIZE
, true,
492 RADEON_GEM_DOMAIN_GTT
,
495 dev_err(rdev
->dev
, "(%d) ring create failed\n", r
);
498 r
= radeon_bo_reserve(ring
->ring_obj
, false);
499 if (unlikely(r
!= 0))
501 r
= radeon_bo_pin(ring
->ring_obj
, RADEON_GEM_DOMAIN_GTT
,
504 radeon_bo_unreserve(ring
->ring_obj
);
505 dev_err(rdev
->dev
, "(%d) ring pin failed\n", r
);
508 r
= radeon_bo_kmap(ring
->ring_obj
,
509 (void **)&ring
->ring
);
510 radeon_bo_unreserve(ring
->ring_obj
);
512 dev_err(rdev
->dev
, "(%d) ring map failed\n", r
);
516 ring
->ptr_mask
= (ring
->ring_size
/ 4) - 1;
517 ring
->ring_free_dw
= ring
->ring_size
/ 4;
518 if (radeon_debugfs_ring_init(rdev
, ring
)) {
519 DRM_ERROR("Failed to register debugfs file for rings !\n");
524 void radeon_ring_fini(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
527 struct radeon_bo
*ring_obj
;
529 mutex_lock(&rdev
->ring_lock
);
530 ring_obj
= ring
->ring_obj
;
533 ring
->ring_obj
= NULL
;
534 mutex_unlock(&rdev
->ring_lock
);
537 r
= radeon_bo_reserve(ring_obj
, false);
538 if (likely(r
== 0)) {
539 radeon_bo_kunmap(ring_obj
);
540 radeon_bo_unpin(ring_obj
);
541 radeon_bo_unreserve(ring_obj
);
543 radeon_bo_unref(&ring_obj
);
550 #if defined(CONFIG_DEBUG_FS)
552 static int radeon_debugfs_ring_info(struct seq_file
*m
, void *data
)
554 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
555 struct drm_device
*dev
= node
->minor
->dev
;
556 struct radeon_device
*rdev
= dev
->dev_private
;
557 int ridx
= *(int*)node
->info_ent
->data
;
558 struct radeon_ring
*ring
= &rdev
->ring
[ridx
];
559 unsigned count
, i
, j
;
561 radeon_ring_free_size(rdev
, ring
);
562 count
= (ring
->ring_size
/ 4) - ring
->ring_free_dw
;
563 seq_printf(m
, "wptr(0x%04x): 0x%08x\n", ring
->wptr_reg
, RREG32(ring
->wptr_reg
));
564 seq_printf(m
, "rptr(0x%04x): 0x%08x\n", ring
->rptr_reg
, RREG32(ring
->rptr_reg
));
565 seq_printf(m
, "driver's copy of the wptr: 0x%08x\n", ring
->wptr
);
566 seq_printf(m
, "driver's copy of the rptr: 0x%08x\n", ring
->rptr
);
567 seq_printf(m
, "%u free dwords in ring\n", ring
->ring_free_dw
);
568 seq_printf(m
, "%u dwords in ring\n", count
);
570 for (j
= 0; j
<= count
; j
++) {
571 seq_printf(m
, "r[%04d]=0x%08x\n", i
, ring
->ring
[i
]);
572 i
= (i
+ 1) & ring
->ptr_mask
;
577 static int radeon_ring_type_gfx_index
= RADEON_RING_TYPE_GFX_INDEX
;
578 static int cayman_ring_type_cp1_index
= CAYMAN_RING_TYPE_CP1_INDEX
;
579 static int cayman_ring_type_cp2_index
= CAYMAN_RING_TYPE_CP2_INDEX
;
581 static struct drm_info_list radeon_debugfs_ring_info_list
[] = {
582 {"radeon_ring_gfx", radeon_debugfs_ring_info
, 0, &radeon_ring_type_gfx_index
},
583 {"radeon_ring_cp1", radeon_debugfs_ring_info
, 0, &cayman_ring_type_cp1_index
},
584 {"radeon_ring_cp2", radeon_debugfs_ring_info
, 0, &cayman_ring_type_cp2_index
},
587 static int radeon_debugfs_ib_info(struct seq_file
*m
, void *data
)
589 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
590 struct drm_device
*dev
= node
->minor
->dev
;
591 struct radeon_device
*rdev
= dev
->dev_private
;
592 struct radeon_ib
*ib
= &rdev
->ib_pool
.ibs
[*((unsigned*)node
->info_ent
->data
)];
598 seq_printf(m
, "IB %04u\n", ib
->idx
);
599 seq_printf(m
, "IB fence %p\n", ib
->fence
);
600 seq_printf(m
, "IB size %05u dwords\n", ib
->length_dw
);
601 for (i
= 0; i
< ib
->length_dw
; i
++) {
602 seq_printf(m
, "[%05u]=0x%08X\n", i
, ib
->ptr
[i
]);
607 static struct drm_info_list radeon_debugfs_ib_list
[RADEON_IB_POOL_SIZE
];
608 static char radeon_debugfs_ib_names
[RADEON_IB_POOL_SIZE
][32];
609 static unsigned radeon_debugfs_ib_idx
[RADEON_IB_POOL_SIZE
];
612 int radeon_debugfs_ring_init(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
614 #if defined(CONFIG_DEBUG_FS)
616 for (i
= 0; i
< ARRAY_SIZE(radeon_debugfs_ring_info_list
); ++i
) {
617 struct drm_info_list
*info
= &radeon_debugfs_ring_info_list
[i
];
618 int ridx
= *(int*)radeon_debugfs_ring_info_list
[i
].data
;
621 if (&rdev
->ring
[ridx
] != ring
)
624 r
= radeon_debugfs_add_files(rdev
, info
, 1);
632 int radeon_debugfs_ib_init(struct radeon_device
*rdev
)
634 #if defined(CONFIG_DEBUG_FS)
637 for (i
= 0; i
< RADEON_IB_POOL_SIZE
; i
++) {
638 sprintf(radeon_debugfs_ib_names
[i
], "radeon_ib_%04u", i
);
639 radeon_debugfs_ib_idx
[i
] = i
;
640 radeon_debugfs_ib_list
[i
].name
= radeon_debugfs_ib_names
[i
];
641 radeon_debugfs_ib_list
[i
].show
= &radeon_debugfs_ib_info
;
642 radeon_debugfs_ib_list
[i
].driver_features
= 0;
643 radeon_debugfs_ib_list
[i
].data
= &radeon_debugfs_ib_idx
[i
];
645 return radeon_debugfs_add_files(rdev
, radeon_debugfs_ib_list
,
646 RADEON_IB_POOL_SIZE
);