2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
26 #include "radeon_asic.h"
31 * Starting with R600, the GPU has an asynchronous
32 * DMA engine. The programming model is very similar
33 * to the 3D engine (ring buffer, IBs, etc.), but the
34 * DMA controller has it's own packet format that is
35 * different form the PM4 format used by the 3D engine.
36 * It supports copying data, writing embedded data,
37 * solid fills, and a number of other things. It also
38 * has support for tiling/detiling of buffers.
42 * r600_dma_get_rptr - get the current read pointer
44 * @rdev: radeon_device pointer
45 * @ring: radeon ring pointer
47 * Get the current rptr from the hardware (r6xx+).
49 uint32_t r600_dma_get_rptr(struct radeon_device
*rdev
,
50 struct radeon_ring
*ring
)
55 rptr
= rdev
->wb
.wb
[ring
->rptr_offs
/4];
57 rptr
= RREG32(DMA_RB_RPTR
);
59 return (rptr
& 0x3fffc) >> 2;
63 * r600_dma_get_wptr - get the current write pointer
65 * @rdev: radeon_device pointer
66 * @ring: radeon ring pointer
68 * Get the current wptr from the hardware (r6xx+).
70 uint32_t r600_dma_get_wptr(struct radeon_device
*rdev
,
71 struct radeon_ring
*ring
)
73 return (RREG32(DMA_RB_WPTR
) & 0x3fffc) >> 2;
77 * r600_dma_set_wptr - commit the write pointer
79 * @rdev: radeon_device pointer
80 * @ring: radeon ring pointer
82 * Write the wptr back to the hardware (r6xx+).
84 void r600_dma_set_wptr(struct radeon_device
*rdev
,
85 struct radeon_ring
*ring
)
87 WREG32(DMA_RB_WPTR
, (ring
->wptr
<< 2) & 0x3fffc);
91 * r600_dma_stop - stop the async dma engine
93 * @rdev: radeon_device pointer
95 * Stop the async dma engine (r6xx-evergreen).
97 void r600_dma_stop(struct radeon_device
*rdev
)
99 u32 rb_cntl
= RREG32(DMA_RB_CNTL
);
101 if (rdev
->asic
->copy
.copy_ring_index
== R600_RING_TYPE_DMA_INDEX
)
102 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.visible_vram_size
);
104 rb_cntl
&= ~DMA_RB_ENABLE
;
105 WREG32(DMA_RB_CNTL
, rb_cntl
);
107 rdev
->ring
[R600_RING_TYPE_DMA_INDEX
].ready
= false;
111 * r600_dma_resume - setup and start the async dma engine
113 * @rdev: radeon_device pointer
115 * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
116 * Returns 0 for success, error for failure.
118 int r600_dma_resume(struct radeon_device
*rdev
)
120 struct radeon_ring
*ring
= &rdev
->ring
[R600_RING_TYPE_DMA_INDEX
];
121 u32 rb_cntl
, dma_cntl
, ib_cntl
;
125 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL
, 0);
126 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL
, 0);
128 /* Set ring buffer size in dwords */
129 rb_bufsz
= order_base_2(ring
->ring_size
/ 4);
130 rb_cntl
= rb_bufsz
<< 1;
132 rb_cntl
|= DMA_RB_SWAP_ENABLE
| DMA_RPTR_WRITEBACK_SWAP_ENABLE
;
134 WREG32(DMA_RB_CNTL
, rb_cntl
);
136 /* Initialize the ring buffer's read and write pointers */
137 WREG32(DMA_RB_RPTR
, 0);
138 WREG32(DMA_RB_WPTR
, 0);
140 /* set the wb address whether it's enabled or not */
141 WREG32(DMA_RB_RPTR_ADDR_HI
,
142 upper_32_bits(rdev
->wb
.gpu_addr
+ R600_WB_DMA_RPTR_OFFSET
) & 0xFF);
143 WREG32(DMA_RB_RPTR_ADDR_LO
,
144 ((rdev
->wb
.gpu_addr
+ R600_WB_DMA_RPTR_OFFSET
) & 0xFFFFFFFC));
146 if (rdev
->wb
.enabled
)
147 rb_cntl
|= DMA_RPTR_WRITEBACK_ENABLE
;
149 WREG32(DMA_RB_BASE
, ring
->gpu_addr
>> 8);
152 ib_cntl
= DMA_IB_ENABLE
;
154 ib_cntl
|= DMA_IB_SWAP_ENABLE
;
156 WREG32(DMA_IB_CNTL
, ib_cntl
);
158 dma_cntl
= RREG32(DMA_CNTL
);
159 dma_cntl
&= ~CTXEMPTY_INT_ENABLE
;
160 WREG32(DMA_CNTL
, dma_cntl
);
162 if (rdev
->family
>= CHIP_RV770
)
166 WREG32(DMA_RB_WPTR
, ring
->wptr
<< 2);
168 WREG32(DMA_RB_CNTL
, rb_cntl
| DMA_RB_ENABLE
);
172 r
= radeon_ring_test(rdev
, R600_RING_TYPE_DMA_INDEX
, ring
);
178 if (rdev
->asic
->copy
.copy_ring_index
== R600_RING_TYPE_DMA_INDEX
)
179 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.real_vram_size
);
185 * r600_dma_fini - tear down the async dma engine
187 * @rdev: radeon_device pointer
189 * Stop the async dma engine and free the ring (r6xx-evergreen).
191 void r600_dma_fini(struct radeon_device
*rdev
)
194 radeon_ring_fini(rdev
, &rdev
->ring
[R600_RING_TYPE_DMA_INDEX
]);
198 * r600_dma_is_lockup - Check if the DMA engine is locked up
200 * @rdev: radeon_device pointer
201 * @ring: radeon_ring structure holding ring information
203 * Check if the async DMA engine is locked up.
204 * Returns true if the engine appears to be locked up, false if not.
206 bool r600_dma_is_lockup(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
208 u32 reset_mask
= r600_gpu_check_soft_reset(rdev
);
210 if (!(reset_mask
& RADEON_RESET_DMA
)) {
211 radeon_ring_lockup_update(rdev
, ring
);
214 return radeon_ring_test_lockup(rdev
, ring
);
219 * r600_dma_ring_test - simple async dma engine test
221 * @rdev: radeon_device pointer
222 * @ring: radeon_ring structure holding ring information
224 * Test the DMA engine by writing using it to write an
225 * value to memory. (r6xx-SI).
226 * Returns 0 for success, error for failure.
228 int r600_dma_ring_test(struct radeon_device
*rdev
,
229 struct radeon_ring
*ring
)
237 if (ring
->idx
== R600_RING_TYPE_DMA_INDEX
)
238 index
= R600_WB_DMA_RING_TEST_OFFSET
;
240 index
= CAYMAN_WB_DMA1_RING_TEST_OFFSET
;
242 gpu_addr
= rdev
->wb
.gpu_addr
+ index
;
245 rdev
->wb
.wb
[index
/4] = cpu_to_le32(tmp
);
247 r
= radeon_ring_lock(rdev
, ring
, 4);
249 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring
->idx
, r
);
252 radeon_ring_write(ring
, DMA_PACKET(DMA_PACKET_WRITE
, 0, 0, 1));
253 radeon_ring_write(ring
, lower_32_bits(gpu_addr
));
254 radeon_ring_write(ring
, upper_32_bits(gpu_addr
) & 0xff);
255 radeon_ring_write(ring
, 0xDEADBEEF);
256 radeon_ring_unlock_commit(rdev
, ring
, false);
258 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
259 tmp
= le32_to_cpu(rdev
->wb
.wb
[index
/4]);
260 if (tmp
== 0xDEADBEEF)
265 if (i
< rdev
->usec_timeout
) {
266 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring
->idx
, i
);
268 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
276 * r600_dma_fence_ring_emit - emit a fence on the DMA ring
278 * @rdev: radeon_device pointer
279 * @fence: radeon fence object
281 * Add a DMA fence packet to the ring to write
282 * the fence seq number and DMA trap packet to generate
283 * an interrupt if needed (r6xx-r7xx).
285 void r600_dma_fence_ring_emit(struct radeon_device
*rdev
,
286 struct radeon_fence
*fence
)
288 struct radeon_ring
*ring
= &rdev
->ring
[fence
->ring
];
289 u64 addr
= rdev
->fence_drv
[fence
->ring
].gpu_addr
;
291 /* write the fence */
292 radeon_ring_write(ring
, DMA_PACKET(DMA_PACKET_FENCE
, 0, 0, 0));
293 radeon_ring_write(ring
, addr
& 0xfffffffc);
294 radeon_ring_write(ring
, (upper_32_bits(addr
) & 0xff));
295 radeon_ring_write(ring
, lower_32_bits(fence
->seq
));
296 /* generate an interrupt */
297 radeon_ring_write(ring
, DMA_PACKET(DMA_PACKET_TRAP
, 0, 0, 0));
301 * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
303 * @rdev: radeon_device pointer
304 * @ring: radeon_ring structure holding ring information
305 * @semaphore: radeon semaphore object
306 * @emit_wait: wait or signal semaphore
308 * Add a DMA semaphore packet to the ring wait on or signal
309 * other rings (r6xx-SI).
311 bool r600_dma_semaphore_ring_emit(struct radeon_device
*rdev
,
312 struct radeon_ring
*ring
,
313 struct radeon_semaphore
*semaphore
,
316 u64 addr
= semaphore
->gpu_addr
;
317 u32 s
= emit_wait
? 0 : 1;
319 radeon_ring_write(ring
, DMA_PACKET(DMA_PACKET_SEMAPHORE
, 0, s
, 0));
320 radeon_ring_write(ring
, addr
& 0xfffffffc);
321 radeon_ring_write(ring
, upper_32_bits(addr
) & 0xff);
327 * r600_dma_ib_test - test an IB on the DMA engine
329 * @rdev: radeon_device pointer
330 * @ring: radeon_ring structure holding ring information
332 * Test a simple IB in the DMA ring (r6xx-SI).
333 * Returns 0 on success, error on failure.
335 int r600_dma_ib_test(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
344 if (ring
->idx
== R600_RING_TYPE_DMA_INDEX
)
345 index
= R600_WB_DMA_RING_TEST_OFFSET
;
347 index
= CAYMAN_WB_DMA1_RING_TEST_OFFSET
;
349 gpu_addr
= rdev
->wb
.gpu_addr
+ index
;
351 r
= radeon_ib_get(rdev
, ring
->idx
, &ib
, NULL
, 256);
353 DRM_ERROR("radeon: failed to get ib (%d).\n", r
);
357 ib
.ptr
[0] = DMA_PACKET(DMA_PACKET_WRITE
, 0, 0, 1);
358 ib
.ptr
[1] = lower_32_bits(gpu_addr
);
359 ib
.ptr
[2] = upper_32_bits(gpu_addr
) & 0xff;
360 ib
.ptr
[3] = 0xDEADBEEF;
363 r
= radeon_ib_schedule(rdev
, &ib
, NULL
, false);
365 radeon_ib_free(rdev
, &ib
);
366 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r
);
369 r
= radeon_fence_wait_timeout(ib
.fence
, false, usecs_to_jiffies(
370 RADEON_USEC_IB_TEST_TIMEOUT
));
372 DRM_ERROR("radeon: fence wait failed (%d).\n", r
);
375 DRM_ERROR("radeon: fence wait timed out.\n");
381 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
382 tmp
= le32_to_cpu(rdev
->wb
.wb
[index
/4]);
383 if (tmp
== 0xDEADBEEF)
387 if (i
< rdev
->usec_timeout
) {
388 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib
.fence
->ring
, i
);
390 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp
);
393 radeon_ib_free(rdev
, &ib
);
398 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
400 * @rdev: radeon_device pointer
401 * @ib: IB object to schedule
403 * Schedule an IB in the DMA ring (r6xx-r7xx).
405 void r600_dma_ring_ib_execute(struct radeon_device
*rdev
, struct radeon_ib
*ib
)
407 struct radeon_ring
*ring
= &rdev
->ring
[ib
->ring
];
409 if (rdev
->wb
.enabled
) {
410 u32 next_rptr
= ring
->wptr
+ 4;
411 while ((next_rptr
& 7) != 5)
414 radeon_ring_write(ring
, DMA_PACKET(DMA_PACKET_WRITE
, 0, 0, 1));
415 radeon_ring_write(ring
, ring
->next_rptr_gpu_addr
& 0xfffffffc);
416 radeon_ring_write(ring
, upper_32_bits(ring
->next_rptr_gpu_addr
) & 0xff);
417 radeon_ring_write(ring
, next_rptr
);
420 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
421 * Pad as necessary with NOPs.
423 while ((ring
->wptr
& 7) != 5)
424 radeon_ring_write(ring
, DMA_PACKET(DMA_PACKET_NOP
, 0, 0, 0));
425 radeon_ring_write(ring
, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER
, 0, 0, 0));
426 radeon_ring_write(ring
, (ib
->gpu_addr
& 0xFFFFFFE0));
427 radeon_ring_write(ring
, (ib
->length_dw
<< 16) | (upper_32_bits(ib
->gpu_addr
) & 0xFF));
432 * r600_copy_dma - copy pages using the DMA engine
434 * @rdev: radeon_device pointer
435 * @src_offset: src GPU address
436 * @dst_offset: dst GPU address
437 * @num_gpu_pages: number of GPU pages to xfer
438 * @fence: radeon fence object
440 * Copy GPU paging using the DMA engine (r6xx).
441 * Used by the radeon ttm implementation to move pages if
442 * registered as the asic copy callback.
444 int r600_copy_dma(struct radeon_device
*rdev
,
445 uint64_t src_offset
, uint64_t dst_offset
,
446 unsigned num_gpu_pages
,
447 struct radeon_fence
**fence
)
449 struct radeon_semaphore
*sem
= NULL
;
450 int ring_index
= rdev
->asic
->copy
.dma_ring_index
;
451 struct radeon_ring
*ring
= &rdev
->ring
[ring_index
];
452 u32 size_in_dw
, cur_size_in_dw
;
456 r
= radeon_semaphore_create(rdev
, &sem
);
458 DRM_ERROR("radeon: moving bo (%d).\n", r
);
462 size_in_dw
= (num_gpu_pages
<< RADEON_GPU_PAGE_SHIFT
) / 4;
463 num_loops
= DIV_ROUND_UP(size_in_dw
, 0xFFFE);
464 r
= radeon_ring_lock(rdev
, ring
, num_loops
* 4 + 8);
466 DRM_ERROR("radeon: moving bo (%d).\n", r
);
467 radeon_semaphore_free(rdev
, &sem
, NULL
);
471 radeon_semaphore_sync_to(sem
, *fence
);
472 radeon_semaphore_sync_rings(rdev
, sem
, ring
->idx
);
474 for (i
= 0; i
< num_loops
; i
++) {
475 cur_size_in_dw
= size_in_dw
;
476 if (cur_size_in_dw
> 0xFFFE)
477 cur_size_in_dw
= 0xFFFE;
478 size_in_dw
-= cur_size_in_dw
;
479 radeon_ring_write(ring
, DMA_PACKET(DMA_PACKET_COPY
, 0, 0, cur_size_in_dw
));
480 radeon_ring_write(ring
, dst_offset
& 0xfffffffc);
481 radeon_ring_write(ring
, src_offset
& 0xfffffffc);
482 radeon_ring_write(ring
, (((upper_32_bits(dst_offset
) & 0xff) << 16) |
483 (upper_32_bits(src_offset
) & 0xff)));
484 src_offset
+= cur_size_in_dw
* 4;
485 dst_offset
+= cur_size_in_dw
* 4;
488 r
= radeon_fence_emit(rdev
, fence
, ring
->idx
);
490 radeon_ring_unlock_undo(rdev
, ring
);
491 radeon_semaphore_free(rdev
, &sem
, NULL
);
495 radeon_ring_unlock_commit(rdev
, ring
, false);
496 radeon_semaphore_free(rdev
, &sem
, *fence
);