2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
29 #include <linux/firmware.h>
30 #include <linux/platform_device.h>
32 #include "radeon_drm.h"
34 #include "radeon_mode.h"
39 #define PFP_UCODE_SIZE 576
40 #define PM4_UCODE_SIZE 1792
41 #define R700_PFP_UCODE_SIZE 848
42 #define R700_PM4_UCODE_SIZE 1360
45 MODULE_FIRMWARE("radeon/R600_pfp.bin");
46 MODULE_FIRMWARE("radeon/R600_me.bin");
47 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
48 MODULE_FIRMWARE("radeon/RV610_me.bin");
49 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
50 MODULE_FIRMWARE("radeon/RV630_me.bin");
51 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
52 MODULE_FIRMWARE("radeon/RV620_me.bin");
53 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
54 MODULE_FIRMWARE("radeon/RV635_me.bin");
55 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
56 MODULE_FIRMWARE("radeon/RV670_me.bin");
57 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
58 MODULE_FIRMWARE("radeon/RS780_me.bin");
59 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
60 MODULE_FIRMWARE("radeon/RV770_me.bin");
61 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
62 MODULE_FIRMWARE("radeon/RV730_me.bin");
63 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
64 MODULE_FIRMWARE("radeon/RV710_me.bin");
66 int r600_debugfs_mc_info_init(struct radeon_device
*rdev
);
68 /* This files gather functions specifics to:
69 * r600,rv610,rv630,rv620,rv635,rv670
71 * Some of these functions might be used by newer ASICs.
73 int r600_mc_wait_for_idle(struct radeon_device
*rdev
);
74 void r600_gpu_init(struct radeon_device
*rdev
);
75 void r600_fini(struct radeon_device
*rdev
);
81 int r600_gart_clear_page(struct radeon_device
*rdev
, int i
)
83 void __iomem
*ptr
= (void *)rdev
->gart
.table
.vram
.ptr
;
86 if (i
< 0 || i
> rdev
->gart
.num_gpu_pages
)
89 writeq(pte
, ((void __iomem
*)ptr
) + (i
* 8));
93 void r600_pcie_gart_tlb_flush(struct radeon_device
*rdev
)
98 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR
, rdev
->mc
.gtt_start
>> 12);
99 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR
, (rdev
->mc
.gtt_end
- 1) >> 12);
100 WREG32(VM_CONTEXT0_REQUEST_RESPONSE
, REQUEST_TYPE(1));
101 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
103 tmp
= RREG32(VM_CONTEXT0_REQUEST_RESPONSE
);
104 tmp
= (tmp
& RESPONSE_TYPE_MASK
) >> RESPONSE_TYPE_SHIFT
;
106 printk(KERN_WARNING
"[drm] r600 flush TLB failed\n");
116 int r600_pcie_gart_init(struct radeon_device
*rdev
)
120 if (rdev
->gart
.table
.vram
.robj
) {
121 WARN(1, "R600 PCIE GART already initialized.\n");
124 /* Initialize common gart structure */
125 r
= radeon_gart_init(rdev
);
128 rdev
->gart
.table_size
= rdev
->gart
.num_gpu_pages
* 8;
129 return radeon_gart_table_vram_alloc(rdev
);
132 int r600_pcie_gart_enable(struct radeon_device
*rdev
)
137 if (rdev
->gart
.table
.vram
.robj
== NULL
) {
138 dev_err(rdev
->dev
, "No VRAM object for PCIE GART.\n");
141 r
= radeon_gart_table_vram_pin(rdev
);
146 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
147 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
148 EFFECTIVE_L2_QUEUE_SIZE(7));
149 WREG32(VM_L2_CNTL2
, 0);
150 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
151 /* Setup TLB control */
152 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
153 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
154 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
155 ENABLE_WAIT_L2_QUERY
;
156 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
157 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
158 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
| ENABLE_L1_STRICT_ORDERING
);
159 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
160 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
161 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
162 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
163 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
164 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
165 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
166 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
167 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
168 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
169 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
170 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR
, rdev
->mc
.gtt_start
>> 12);
171 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR
, (rdev
->mc
.gtt_end
- 1) >> 12);
172 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, rdev
->gart
.table_addr
>> 12);
173 WREG32(VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
| PAGE_TABLE_DEPTH(0) |
174 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
);
175 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
176 (u32
)(rdev
->dummy_page
.addr
>> 12));
177 for (i
= 1; i
< 7; i
++)
178 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
180 r600_pcie_gart_tlb_flush(rdev
);
181 rdev
->gart
.ready
= true;
185 void r600_pcie_gart_disable(struct radeon_device
*rdev
)
190 /* Disable all tables */
191 for (i
= 0; i
< 7; i
++)
192 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
194 /* Disable L2 cache */
195 WREG32(VM_L2_CNTL
, ENABLE_L2_FRAGMENT_PROCESSING
|
196 EFFECTIVE_L2_QUEUE_SIZE(7));
197 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
198 /* Setup L1 TLB control */
199 tmp
= EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
200 ENABLE_WAIT_L2_QUERY
;
201 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
202 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
203 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
204 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
205 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
206 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
207 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
208 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
209 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
);
210 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
);
211 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
212 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
213 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
);
214 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
215 if (rdev
->gart
.table
.vram
.robj
) {
216 radeon_object_kunmap(rdev
->gart
.table
.vram
.robj
);
217 radeon_object_unpin(rdev
->gart
.table
.vram
.robj
);
221 void r600_pcie_gart_fini(struct radeon_device
*rdev
)
223 r600_pcie_gart_disable(rdev
);
224 radeon_gart_table_vram_free(rdev
);
225 radeon_gart_fini(rdev
);
228 int r600_mc_wait_for_idle(struct radeon_device
*rdev
)
233 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
235 tmp
= RREG32(R_000E50_SRBM_STATUS
) & 0x3F00;
243 static void r600_mc_program(struct radeon_device
*rdev
)
245 struct rv515_mc_save save
;
250 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
251 WREG32((0x2c14 + j
), 0x00000000);
252 WREG32((0x2c18 + j
), 0x00000000);
253 WREG32((0x2c1c + j
), 0x00000000);
254 WREG32((0x2c20 + j
), 0x00000000);
255 WREG32((0x2c24 + j
), 0x00000000);
257 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL
, 0);
259 rv515_mc_stop(rdev
, &save
);
260 if (r600_mc_wait_for_idle(rdev
)) {
261 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
263 /* Lockout access through VGA aperture (doesn't exist before R600) */
264 WREG32(VGA_HDP_CONTROL
, VGA_MEMORY_DISABLE
);
265 /* Update configuration */
266 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
, rdev
->mc
.vram_start
>> 12);
267 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
, (rdev
->mc
.vram_end
- 1) >> 12);
268 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
, 0);
269 tmp
= (((rdev
->mc
.vram_end
- 1) >> 24) & 0xFFFF) << 16;
270 tmp
|= ((rdev
->mc
.vram_start
>> 24) & 0xFFFF);
271 WREG32(MC_VM_FB_LOCATION
, tmp
);
272 WREG32(HDP_NONSURFACE_BASE
, (rdev
->mc
.vram_start
>> 8));
273 WREG32(HDP_NONSURFACE_INFO
, (2 << 7));
274 WREG32(HDP_NONSURFACE_SIZE
, (rdev
->mc
.mc_vram_size
- 1) | 0x3FF);
275 if (rdev
->flags
& RADEON_IS_AGP
) {
276 WREG32(MC_VM_AGP_TOP
, (rdev
->mc
.gtt_end
- 1) >> 16);
277 WREG32(MC_VM_AGP_BOT
, rdev
->mc
.gtt_start
>> 16);
278 WREG32(MC_VM_AGP_BASE
, rdev
->mc
.agp_base
>> 22);
280 WREG32(MC_VM_AGP_BASE
, 0);
281 WREG32(MC_VM_AGP_TOP
, 0x0FFFFFFF);
282 WREG32(MC_VM_AGP_BOT
, 0x0FFFFFFF);
284 if (r600_mc_wait_for_idle(rdev
)) {
285 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
287 rv515_mc_resume(rdev
, &save
);
288 /* we need to own VRAM, so turn off the VGA renderer here
289 * to stop it overwriting our objects */
290 rv515_vga_render_disable(rdev
);
293 int r600_mc_init(struct radeon_device
*rdev
)
300 /* Get VRAM informations */
301 rdev
->mc
.vram_width
= 128;
302 rdev
->mc
.vram_is_ddr
= true;
303 tmp
= RREG32(RAMCFG
);
304 if (tmp
& CHANSIZE_OVERRIDE
) {
306 } else if (tmp
& CHANSIZE_MASK
) {
311 if (rdev
->family
== CHIP_R600
) {
312 rdev
->mc
.vram_width
= 8 * chansize
;
313 } else if (rdev
->family
== CHIP_RV670
) {
314 rdev
->mc
.vram_width
= 4 * chansize
;
315 } else if ((rdev
->family
== CHIP_RV610
) ||
316 (rdev
->family
== CHIP_RV620
)) {
317 rdev
->mc
.vram_width
= chansize
;
318 } else if ((rdev
->family
== CHIP_RV630
) ||
319 (rdev
->family
== CHIP_RV635
)) {
320 rdev
->mc
.vram_width
= 2 * chansize
;
322 /* Could aper size report 0 ? */
323 rdev
->mc
.aper_base
= drm_get_resource_start(rdev
->ddev
, 0);
324 rdev
->mc
.aper_size
= drm_get_resource_len(rdev
->ddev
, 0);
325 /* Setup GPU memory space */
326 rdev
->mc
.mc_vram_size
= RREG32(CONFIG_MEMSIZE
);
327 rdev
->mc
.real_vram_size
= RREG32(CONFIG_MEMSIZE
);
329 if (rdev
->mc
.mc_vram_size
> rdev
->mc
.aper_size
)
330 rdev
->mc
.mc_vram_size
= rdev
->mc
.aper_size
;
332 if (rdev
->mc
.real_vram_size
> rdev
->mc
.aper_size
)
333 rdev
->mc
.real_vram_size
= rdev
->mc
.aper_size
;
335 if (rdev
->flags
& RADEON_IS_AGP
) {
336 r
= radeon_agp_init(rdev
);
339 /* gtt_size is setup by radeon_agp_init */
340 rdev
->mc
.gtt_location
= rdev
->mc
.agp_base
;
341 tmp
= 0xFFFFFFFFUL
- rdev
->mc
.agp_base
- rdev
->mc
.gtt_size
;
342 /* Try to put vram before or after AGP because we
343 * we want SYSTEM_APERTURE to cover both VRAM and
344 * AGP so that GPU can catch out of VRAM/AGP access
346 if (rdev
->mc
.gtt_location
> rdev
->mc
.mc_vram_size
) {
347 /* Enought place before */
348 rdev
->mc
.vram_location
= rdev
->mc
.gtt_location
-
349 rdev
->mc
.mc_vram_size
;
350 } else if (tmp
> rdev
->mc
.mc_vram_size
) {
351 /* Enought place after */
352 rdev
->mc
.vram_location
= rdev
->mc
.gtt_location
+
355 /* Try to setup VRAM then AGP might not
356 * not work on some card
358 rdev
->mc
.vram_location
= 0x00000000UL
;
359 rdev
->mc
.gtt_location
= rdev
->mc
.mc_vram_size
;
362 if (rdev
->family
== CHIP_RS780
|| rdev
->family
== CHIP_RS880
) {
363 rdev
->mc
.vram_location
= (RREG32(MC_VM_FB_LOCATION
) &
365 rdev
->mc
.gtt_size
= radeon_gart_size
* 1024 * 1024;
366 tmp
= rdev
->mc
.vram_location
+ rdev
->mc
.mc_vram_size
;
367 if ((0xFFFFFFFFUL
- tmp
) >= rdev
->mc
.gtt_size
) {
368 /* Enough place after vram */
369 rdev
->mc
.gtt_location
= tmp
;
370 } else if (rdev
->mc
.vram_location
>= rdev
->mc
.gtt_size
) {
371 /* Enough place before vram */
372 rdev
->mc
.gtt_location
= 0;
374 /* Not enough place after or before shrink
377 if (rdev
->mc
.vram_location
> (0xFFFFFFFFUL
- tmp
)) {
378 rdev
->mc
.gtt_location
= 0;
379 rdev
->mc
.gtt_size
= rdev
->mc
.vram_location
;
381 rdev
->mc
.gtt_location
= tmp
;
382 rdev
->mc
.gtt_size
= 0xFFFFFFFFUL
- tmp
;
385 rdev
->mc
.gtt_location
= rdev
->mc
.mc_vram_size
;
387 rdev
->mc
.vram_location
= 0x00000000UL
;
388 rdev
->mc
.gtt_location
= rdev
->mc
.mc_vram_size
;
389 rdev
->mc
.gtt_size
= radeon_gart_size
* 1024 * 1024;
392 rdev
->mc
.vram_start
= rdev
->mc
.vram_location
;
393 rdev
->mc
.vram_end
= rdev
->mc
.vram_location
+ rdev
->mc
.mc_vram_size
;
394 rdev
->mc
.gtt_start
= rdev
->mc
.gtt_location
;
395 rdev
->mc
.gtt_end
= rdev
->mc
.gtt_location
+ rdev
->mc
.gtt_size
;
396 /* FIXME: we should enforce default clock in case GPU is not in
399 a
.full
= rfixed_const(100);
400 rdev
->pm
.sclk
.full
= rfixed_const(rdev
->clock
.default_sclk
);
401 rdev
->pm
.sclk
.full
= rfixed_div(rdev
->pm
.sclk
, a
);
405 /* We doesn't check that the GPU really needs a reset we simply do the
406 * reset, it's up to the caller to determine if the GPU needs one. We
407 * might add an helper function to check that.
409 int r600_gpu_soft_reset(struct radeon_device
*rdev
)
411 struct rv515_mc_save save
;
412 u32 grbm_busy_mask
= S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
413 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
414 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
415 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
416 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
417 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
418 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
419 S_008010_GUI_ACTIVE(1);
420 u32 grbm2_busy_mask
= S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
421 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
422 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
423 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
424 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
425 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
426 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
427 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
431 dev_info(rdev
->dev
, "GPU softreset (R_008010_GRBM_STATUS=0x%08X "
432 "R_008014_GRBM_STATUS2=0x%08X)\n", RREG32(R_008010_GRBM_STATUS
),
433 RREG32(R_008014_GRBM_STATUS2
));
434 rv515_mc_stop(rdev
, &save
);
435 if (r600_mc_wait_for_idle(rdev
)) {
436 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
438 /* Disable CP parsing/prefetching */
439 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(0xff));
440 /* Check if any of the rendering block is busy and reset it */
441 if ((RREG32(R_008010_GRBM_STATUS
) & grbm_busy_mask
) ||
442 (RREG32(R_008014_GRBM_STATUS2
) & grbm2_busy_mask
)) {
443 tmp
= S_008020_SOFT_RESET_CR(1) |
444 S_008020_SOFT_RESET_DB(1) |
445 S_008020_SOFT_RESET_CB(1) |
446 S_008020_SOFT_RESET_PA(1) |
447 S_008020_SOFT_RESET_SC(1) |
448 S_008020_SOFT_RESET_SMX(1) |
449 S_008020_SOFT_RESET_SPI(1) |
450 S_008020_SOFT_RESET_SX(1) |
451 S_008020_SOFT_RESET_SH(1) |
452 S_008020_SOFT_RESET_TC(1) |
453 S_008020_SOFT_RESET_TA(1) |
454 S_008020_SOFT_RESET_VC(1) |
455 S_008020_SOFT_RESET_VGT(1);
456 dev_info(rdev
->dev
, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp
);
457 WREG32(R_008020_GRBM_SOFT_RESET
, tmp
);
458 (void)RREG32(R_008020_GRBM_SOFT_RESET
);
460 WREG32(R_008020_GRBM_SOFT_RESET
, 0);
461 (void)RREG32(R_008020_GRBM_SOFT_RESET
);
463 /* Reset CP (we always reset CP) */
464 tmp
= S_008020_SOFT_RESET_CP(1);
465 dev_info(rdev
->dev
, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp
);
466 WREG32(R_008020_GRBM_SOFT_RESET
, tmp
);
467 (void)RREG32(R_008020_GRBM_SOFT_RESET
);
469 WREG32(R_008020_GRBM_SOFT_RESET
, 0);
470 (void)RREG32(R_008020_GRBM_SOFT_RESET
);
471 /* Reset others GPU block if necessary */
472 if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS
)))
473 srbm_reset
|= S_000E60_SOFT_RESET_RLC(1);
474 if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS
)))
475 srbm_reset
|= S_000E60_SOFT_RESET_GRBM(1);
476 if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS
)))
477 srbm_reset
|= S_000E60_SOFT_RESET_IH(1);
478 if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS
)))
479 srbm_reset
|= S_000E60_SOFT_RESET_VMC(1);
480 if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS
)))
481 srbm_reset
|= S_000E60_SOFT_RESET_MC(1);
482 if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS
)))
483 srbm_reset
|= S_000E60_SOFT_RESET_MC(1);
484 if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS
)))
485 srbm_reset
|= S_000E60_SOFT_RESET_MC(1);
486 if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS
)))
487 srbm_reset
|= S_000E60_SOFT_RESET_MC(1);
488 if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS
)))
489 srbm_reset
|= S_000E60_SOFT_RESET_MC(1);
490 if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS
)))
491 srbm_reset
|= S_000E60_SOFT_RESET_RLC(1);
492 if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS
)))
493 srbm_reset
|= S_000E60_SOFT_RESET_SEM(1);
494 dev_info(rdev
->dev
, "R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset
);
495 WREG32(R_000E60_SRBM_SOFT_RESET
, srbm_reset
);
496 (void)RREG32(R_000E60_SRBM_SOFT_RESET
);
498 WREG32(R_000E60_SRBM_SOFT_RESET
, 0);
499 (void)RREG32(R_000E60_SRBM_SOFT_RESET
);
500 /* Wait a little for things to settle down */
502 /* After reset we need to reinit the asic as GPU often endup in an
505 atom_asic_init(rdev
->mode_info
.atom_context
);
506 rv515_mc_resume(rdev
, &save
);
510 int r600_gpu_reset(struct radeon_device
*rdev
)
512 return r600_gpu_soft_reset(rdev
);
515 static u32
r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes
,
517 u32 backend_disable_mask
)
520 u32 enabled_backends_mask
;
521 u32 enabled_backends_count
;
523 u32 swizzle_pipe
[R6XX_MAX_PIPES
];
527 if (num_tile_pipes
> R6XX_MAX_PIPES
)
528 num_tile_pipes
= R6XX_MAX_PIPES
;
529 if (num_tile_pipes
< 1)
531 if (num_backends
> R6XX_MAX_BACKENDS
)
532 num_backends
= R6XX_MAX_BACKENDS
;
533 if (num_backends
< 1)
536 enabled_backends_mask
= 0;
537 enabled_backends_count
= 0;
538 for (i
= 0; i
< R6XX_MAX_BACKENDS
; ++i
) {
539 if (((backend_disable_mask
>> i
) & 1) == 0) {
540 enabled_backends_mask
|= (1 << i
);
541 ++enabled_backends_count
;
543 if (enabled_backends_count
== num_backends
)
547 if (enabled_backends_count
== 0) {
548 enabled_backends_mask
= 1;
549 enabled_backends_count
= 1;
552 if (enabled_backends_count
!= num_backends
)
553 num_backends
= enabled_backends_count
;
555 memset((uint8_t *)&swizzle_pipe
[0], 0, sizeof(u32
) * R6XX_MAX_PIPES
);
556 switch (num_tile_pipes
) {
612 for (cur_pipe
= 0; cur_pipe
< num_tile_pipes
; ++cur_pipe
) {
613 while (((1 << cur_backend
) & enabled_backends_mask
) == 0)
614 cur_backend
= (cur_backend
+ 1) % R6XX_MAX_BACKENDS
;
616 backend_map
|= (u32
)(((cur_backend
& 3) << (swizzle_pipe
[cur_pipe
] * 2)));
618 cur_backend
= (cur_backend
+ 1) % R6XX_MAX_BACKENDS
;
624 int r600_count_pipe_bits(uint32_t val
)
628 for (i
= 0; i
< 32; i
++) {
635 void r600_gpu_init(struct radeon_device
*rdev
)
642 u32 sq_gpr_resource_mgmt_1
= 0;
643 u32 sq_gpr_resource_mgmt_2
= 0;
644 u32 sq_thread_resource_mgmt
= 0;
645 u32 sq_stack_resource_mgmt_1
= 0;
646 u32 sq_stack_resource_mgmt_2
= 0;
648 /* FIXME: implement */
649 switch (rdev
->family
) {
651 rdev
->config
.r600
.max_pipes
= 4;
652 rdev
->config
.r600
.max_tile_pipes
= 8;
653 rdev
->config
.r600
.max_simds
= 4;
654 rdev
->config
.r600
.max_backends
= 4;
655 rdev
->config
.r600
.max_gprs
= 256;
656 rdev
->config
.r600
.max_threads
= 192;
657 rdev
->config
.r600
.max_stack_entries
= 256;
658 rdev
->config
.r600
.max_hw_contexts
= 8;
659 rdev
->config
.r600
.max_gs_threads
= 16;
660 rdev
->config
.r600
.sx_max_export_size
= 128;
661 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
662 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
663 rdev
->config
.r600
.sq_num_cf_insts
= 2;
667 rdev
->config
.r600
.max_pipes
= 2;
668 rdev
->config
.r600
.max_tile_pipes
= 2;
669 rdev
->config
.r600
.max_simds
= 3;
670 rdev
->config
.r600
.max_backends
= 1;
671 rdev
->config
.r600
.max_gprs
= 128;
672 rdev
->config
.r600
.max_threads
= 192;
673 rdev
->config
.r600
.max_stack_entries
= 128;
674 rdev
->config
.r600
.max_hw_contexts
= 8;
675 rdev
->config
.r600
.max_gs_threads
= 4;
676 rdev
->config
.r600
.sx_max_export_size
= 128;
677 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
678 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
679 rdev
->config
.r600
.sq_num_cf_insts
= 2;
685 rdev
->config
.r600
.max_pipes
= 1;
686 rdev
->config
.r600
.max_tile_pipes
= 1;
687 rdev
->config
.r600
.max_simds
= 2;
688 rdev
->config
.r600
.max_backends
= 1;
689 rdev
->config
.r600
.max_gprs
= 128;
690 rdev
->config
.r600
.max_threads
= 192;
691 rdev
->config
.r600
.max_stack_entries
= 128;
692 rdev
->config
.r600
.max_hw_contexts
= 4;
693 rdev
->config
.r600
.max_gs_threads
= 4;
694 rdev
->config
.r600
.sx_max_export_size
= 128;
695 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
696 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
697 rdev
->config
.r600
.sq_num_cf_insts
= 1;
700 rdev
->config
.r600
.max_pipes
= 4;
701 rdev
->config
.r600
.max_tile_pipes
= 4;
702 rdev
->config
.r600
.max_simds
= 4;
703 rdev
->config
.r600
.max_backends
= 4;
704 rdev
->config
.r600
.max_gprs
= 192;
705 rdev
->config
.r600
.max_threads
= 192;
706 rdev
->config
.r600
.max_stack_entries
= 256;
707 rdev
->config
.r600
.max_hw_contexts
= 8;
708 rdev
->config
.r600
.max_gs_threads
= 16;
709 rdev
->config
.r600
.sx_max_export_size
= 128;
710 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
711 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
712 rdev
->config
.r600
.sq_num_cf_insts
= 2;
719 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
720 WREG32((0x2c14 + j
), 0x00000000);
721 WREG32((0x2c18 + j
), 0x00000000);
722 WREG32((0x2c1c + j
), 0x00000000);
723 WREG32((0x2c20 + j
), 0x00000000);
724 WREG32((0x2c24 + j
), 0x00000000);
727 WREG32(GRBM_CNTL
, GRBM_READ_TIMEOUT(0xff));
731 ramcfg
= RREG32(RAMCFG
);
732 switch (rdev
->config
.r600
.max_tile_pipes
) {
734 tiling_config
|= PIPE_TILING(0);
737 tiling_config
|= PIPE_TILING(1);
740 tiling_config
|= PIPE_TILING(2);
743 tiling_config
|= PIPE_TILING(3);
748 tiling_config
|= BANK_TILING((ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
);
749 tiling_config
|= GROUP_SIZE(0);
750 tmp
= (ramcfg
& NOOFROWS_MASK
) >> NOOFROWS_SHIFT
;
752 tiling_config
|= ROW_TILING(3);
753 tiling_config
|= SAMPLE_SPLIT(3);
755 tiling_config
|= ROW_TILING(tmp
);
756 tiling_config
|= SAMPLE_SPLIT(tmp
);
758 tiling_config
|= BANK_SWAPS(1);
759 tmp
= r600_get_tile_pipe_to_backend_map(rdev
->config
.r600
.max_tile_pipes
,
760 rdev
->config
.r600
.max_backends
,
761 (0xff << rdev
->config
.r600
.max_backends
) & 0xff);
762 tiling_config
|= BACKEND_MAP(tmp
);
763 WREG32(GB_TILING_CONFIG
, tiling_config
);
764 WREG32(DCP_TILING_CONFIG
, tiling_config
& 0xffff);
765 WREG32(HDP_TILING_CONFIG
, tiling_config
& 0xffff);
767 tmp
= BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK
<< rdev
->config
.r600
.max_backends
) & R6XX_MAX_BACKENDS_MASK
);
768 WREG32(CC_RB_BACKEND_DISABLE
, tmp
);
771 tmp
= INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK
<< rdev
->config
.r600
.max_pipes
) & R6XX_MAX_PIPES_MASK
);
772 tmp
|= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK
<< rdev
->config
.r600
.max_simds
) & R6XX_MAX_SIMDS_MASK
);
773 WREG32(CC_GC_SHADER_PIPE_CONFIG
, tmp
);
774 WREG32(GC_USER_SHADER_PIPE_CONFIG
, tmp
);
776 tmp
= R6XX_MAX_BACKENDS
- r600_count_pipe_bits(tmp
& INACTIVE_QD_PIPES_MASK
);
777 WREG32(VGT_OUT_DEALLOC_CNTL
, (tmp
* 4) & DEALLOC_DIST_MASK
);
778 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL
, ((tmp
* 4) - 2) & VTX_REUSE_DEPTH_MASK
);
780 /* Setup some CP states */
781 WREG32(CP_QUEUE_THRESHOLDS
, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
782 WREG32(CP_MEQ_THRESHOLDS
, (MEQ_END(0x40) | ROQ_END(0x40)));
784 WREG32(TA_CNTL_AUX
, (DISABLE_CUBE_ANISO
| SYNC_GRADIENT
|
785 SYNC_WALKER
| SYNC_ALIGNER
));
786 /* Setup various GPU states */
787 if (rdev
->family
== CHIP_RV670
)
788 WREG32(ARB_GDEC_RD_CNTL
, 0x00000021);
790 tmp
= RREG32(SX_DEBUG_1
);
791 tmp
|= SMX_EVENT_RELEASE
;
792 if ((rdev
->family
> CHIP_R600
))
793 tmp
|= ENABLE_NEW_SMX_ADDRESS
;
794 WREG32(SX_DEBUG_1
, tmp
);
796 if (((rdev
->family
) == CHIP_R600
) ||
797 ((rdev
->family
) == CHIP_RV630
) ||
798 ((rdev
->family
) == CHIP_RV610
) ||
799 ((rdev
->family
) == CHIP_RV620
) ||
800 ((rdev
->family
) == CHIP_RS780
)) {
801 WREG32(DB_DEBUG
, PREZ_MUST_WAIT_FOR_POSTZ_DONE
);
805 WREG32(DB_WATERMARKS
, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
806 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
808 WREG32(PA_SC_MULTI_CHIP_CNTL
, 0);
809 WREG32(VGT_NUM_INSTANCES
, 0);
811 WREG32(SPI_CONFIG_CNTL
, GPR_WRITE_PRIORITY(0));
812 WREG32(SPI_CONFIG_CNTL_1
, VTX_DONE_DELAY(0));
814 tmp
= RREG32(SQ_MS_FIFO_SIZES
);
815 if (((rdev
->family
) == CHIP_RV610
) ||
816 ((rdev
->family
) == CHIP_RV620
) ||
817 ((rdev
->family
) == CHIP_RS780
)) {
818 tmp
= (CACHE_FIFO_SIZE(0xa) |
819 FETCH_FIFO_HIWATER(0xa) |
820 DONE_FIFO_HIWATER(0xe0) |
821 ALU_UPDATE_FIFO_HIWATER(0x8));
822 } else if (((rdev
->family
) == CHIP_R600
) ||
823 ((rdev
->family
) == CHIP_RV630
)) {
824 tmp
&= ~DONE_FIFO_HIWATER(0xff);
825 tmp
|= DONE_FIFO_HIWATER(0x4);
827 WREG32(SQ_MS_FIFO_SIZES
, tmp
);
829 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
830 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
832 sq_config
= RREG32(SQ_CONFIG
);
833 sq_config
&= ~(PS_PRIO(3) |
837 sq_config
|= (DX9_CONSTS
|
844 if ((rdev
->family
) == CHIP_R600
) {
845 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(124) |
847 NUM_CLAUSE_TEMP_GPRS(4));
848 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(0) |
850 sq_thread_resource_mgmt
= (NUM_PS_THREADS(136) |
854 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(128) |
855 NUM_VS_STACK_ENTRIES(128));
856 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(0) |
857 NUM_ES_STACK_ENTRIES(0));
858 } else if (((rdev
->family
) == CHIP_RV610
) ||
859 ((rdev
->family
) == CHIP_RV620
) ||
860 ((rdev
->family
) == CHIP_RS780
)) {
861 /* no vertex cache */
862 sq_config
&= ~VC_ENABLE
;
864 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
866 NUM_CLAUSE_TEMP_GPRS(2));
867 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(17) |
869 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
873 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(40) |
874 NUM_VS_STACK_ENTRIES(40));
875 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(32) |
876 NUM_ES_STACK_ENTRIES(16));
877 } else if (((rdev
->family
) == CHIP_RV630
) ||
878 ((rdev
->family
) == CHIP_RV635
)) {
879 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
881 NUM_CLAUSE_TEMP_GPRS(2));
882 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(18) |
884 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
888 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(40) |
889 NUM_VS_STACK_ENTRIES(40));
890 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(32) |
891 NUM_ES_STACK_ENTRIES(16));
892 } else if ((rdev
->family
) == CHIP_RV670
) {
893 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
895 NUM_CLAUSE_TEMP_GPRS(2));
896 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(17) |
898 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
902 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(64) |
903 NUM_VS_STACK_ENTRIES(64));
904 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(64) |
905 NUM_ES_STACK_ENTRIES(64));
908 WREG32(SQ_CONFIG
, sq_config
);
909 WREG32(SQ_GPR_RESOURCE_MGMT_1
, sq_gpr_resource_mgmt_1
);
910 WREG32(SQ_GPR_RESOURCE_MGMT_2
, sq_gpr_resource_mgmt_2
);
911 WREG32(SQ_THREAD_RESOURCE_MGMT
, sq_thread_resource_mgmt
);
912 WREG32(SQ_STACK_RESOURCE_MGMT_1
, sq_stack_resource_mgmt_1
);
913 WREG32(SQ_STACK_RESOURCE_MGMT_2
, sq_stack_resource_mgmt_2
);
915 if (((rdev
->family
) == CHIP_RV610
) ||
916 ((rdev
->family
) == CHIP_RV620
) ||
917 ((rdev
->family
) == CHIP_RS780
)) {
918 WREG32(VGT_CACHE_INVALIDATION
, CACHE_INVALIDATION(TC_ONLY
));
920 WREG32(VGT_CACHE_INVALIDATION
, CACHE_INVALIDATION(VC_AND_TC
));
923 /* More default values. 2D/3D driver should adjust as needed */
924 WREG32(PA_SC_AA_SAMPLE_LOCS_2S
, (S0_X(0xc) | S0_Y(0x4) |
925 S1_X(0x4) | S1_Y(0xc)));
926 WREG32(PA_SC_AA_SAMPLE_LOCS_4S
, (S0_X(0xe) | S0_Y(0xe) |
927 S1_X(0x2) | S1_Y(0x2) |
928 S2_X(0xa) | S2_Y(0x6) |
929 S3_X(0x6) | S3_Y(0xa)));
930 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0
, (S0_X(0xe) | S0_Y(0xb) |
931 S1_X(0x4) | S1_Y(0xc) |
932 S2_X(0x1) | S2_Y(0x6) |
933 S3_X(0xa) | S3_Y(0xe)));
934 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1
, (S4_X(0x6) | S4_Y(0x1) |
935 S5_X(0x0) | S5_Y(0x0) |
936 S6_X(0xb) | S6_Y(0x4) |
937 S7_X(0x7) | S7_Y(0x8)));
939 WREG32(VGT_STRMOUT_EN
, 0);
940 tmp
= rdev
->config
.r600
.max_pipes
* 16;
941 switch (rdev
->family
) {
956 WREG32(VGT_ES_PER_GS
, 128);
957 WREG32(VGT_GS_PER_ES
, tmp
);
958 WREG32(VGT_GS_PER_VS
, 2);
959 WREG32(VGT_GS_VERTEX_REUSE
, 16);
961 /* more default values. 2D/3D driver should adjust as needed */
962 WREG32(PA_SC_LINE_STIPPLE_STATE
, 0);
963 WREG32(VGT_STRMOUT_EN
, 0);
965 WREG32(PA_SC_MODE_CNTL
, 0);
966 WREG32(PA_SC_AA_CONFIG
, 0);
967 WREG32(PA_SC_LINE_STIPPLE
, 0);
968 WREG32(SPI_INPUT_Z
, 0);
969 WREG32(SPI_PS_IN_CONTROL_0
, NUM_INTERP(2));
970 WREG32(CB_COLOR7_FRAG
, 0);
972 /* Clear render buffer base addresses */
973 WREG32(CB_COLOR0_BASE
, 0);
974 WREG32(CB_COLOR1_BASE
, 0);
975 WREG32(CB_COLOR2_BASE
, 0);
976 WREG32(CB_COLOR3_BASE
, 0);
977 WREG32(CB_COLOR4_BASE
, 0);
978 WREG32(CB_COLOR5_BASE
, 0);
979 WREG32(CB_COLOR6_BASE
, 0);
980 WREG32(CB_COLOR7_BASE
, 0);
981 WREG32(CB_COLOR7_FRAG
, 0);
983 switch (rdev
->family
) {
994 tmp
= TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT
;
1000 WREG32(TC_CNTL
, tmp
);
1002 tmp
= RREG32(HDP_HOST_PATH_CNTL
);
1003 WREG32(HDP_HOST_PATH_CNTL
, tmp
);
1005 tmp
= RREG32(ARB_POP
);
1006 tmp
|= ENABLE_TC128
;
1007 WREG32(ARB_POP
, tmp
);
1009 WREG32(PA_SC_MULTI_CHIP_CNTL
, 0);
1010 WREG32(PA_CL_ENHANCE
, (CLIP_VTX_REORDER_ENA
|
1012 WREG32(PA_SC_ENHANCE
, FORCE_EOV_MAX_CLK_CNT(4095));
1017 * Indirect registers accessor
1019 u32
r600_pciep_rreg(struct radeon_device
*rdev
, u32 reg
)
1023 WREG32(PCIE_PORT_INDEX
, ((reg
) & 0xff));
1024 (void)RREG32(PCIE_PORT_INDEX
);
1025 r
= RREG32(PCIE_PORT_DATA
);
1029 void r600_pciep_wreg(struct radeon_device
*rdev
, u32 reg
, u32 v
)
1031 WREG32(PCIE_PORT_INDEX
, ((reg
) & 0xff));
1032 (void)RREG32(PCIE_PORT_INDEX
);
1033 WREG32(PCIE_PORT_DATA
, (v
));
1034 (void)RREG32(PCIE_PORT_DATA
);
1041 void r600_cp_stop(struct radeon_device
*rdev
)
1043 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1));
1046 int r600_cp_init_microcode(struct radeon_device
*rdev
)
1048 struct platform_device
*pdev
;
1049 const char *chip_name
;
1050 size_t pfp_req_size
, me_req_size
;
1056 pdev
= platform_device_register_simple("radeon_cp", 0, NULL
, 0);
1059 printk(KERN_ERR
"radeon_cp: Failed to register firmware\n");
1063 switch (rdev
->family
) {
1064 case CHIP_R600
: chip_name
= "R600"; break;
1065 case CHIP_RV610
: chip_name
= "RV610"; break;
1066 case CHIP_RV630
: chip_name
= "RV630"; break;
1067 case CHIP_RV620
: chip_name
= "RV620"; break;
1068 case CHIP_RV635
: chip_name
= "RV635"; break;
1069 case CHIP_RV670
: chip_name
= "RV670"; break;
1071 case CHIP_RS880
: chip_name
= "RS780"; break;
1072 case CHIP_RV770
: chip_name
= "RV770"; break;
1074 case CHIP_RV740
: chip_name
= "RV730"; break;
1075 case CHIP_RV710
: chip_name
= "RV710"; break;
1079 if (rdev
->family
>= CHIP_RV770
) {
1080 pfp_req_size
= R700_PFP_UCODE_SIZE
* 4;
1081 me_req_size
= R700_PM4_UCODE_SIZE
* 4;
1083 pfp_req_size
= PFP_UCODE_SIZE
* 4;
1084 me_req_size
= PM4_UCODE_SIZE
* 12;
1087 DRM_INFO("Loading %s CP Microcode\n", chip_name
);
1089 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_pfp.bin", chip_name
);
1090 err
= request_firmware(&rdev
->pfp_fw
, fw_name
, &pdev
->dev
);
1093 if (rdev
->pfp_fw
->size
!= pfp_req_size
) {
1095 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1096 rdev
->pfp_fw
->size
, fw_name
);
1101 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_me.bin", chip_name
);
1102 err
= request_firmware(&rdev
->me_fw
, fw_name
, &pdev
->dev
);
1105 if (rdev
->me_fw
->size
!= me_req_size
) {
1107 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1108 rdev
->me_fw
->size
, fw_name
);
1112 platform_device_unregister(pdev
);
1117 "r600_cp: Failed to load firmware \"%s\"\n",
1119 release_firmware(rdev
->pfp_fw
);
1120 rdev
->pfp_fw
= NULL
;
1121 release_firmware(rdev
->me_fw
);
1127 static int r600_cp_load_microcode(struct radeon_device
*rdev
)
1129 const __be32
*fw_data
;
1132 if (!rdev
->me_fw
|| !rdev
->pfp_fw
)
1137 WREG32(CP_RB_CNTL
, RB_NO_UPDATE
| RB_BLKSZ(15) | RB_BUFSZ(3));
1140 WREG32(GRBM_SOFT_RESET
, SOFT_RESET_CP
);
1141 RREG32(GRBM_SOFT_RESET
);
1143 WREG32(GRBM_SOFT_RESET
, 0);
1145 WREG32(CP_ME_RAM_WADDR
, 0);
1147 fw_data
= (const __be32
*)rdev
->me_fw
->data
;
1148 WREG32(CP_ME_RAM_WADDR
, 0);
1149 for (i
= 0; i
< PM4_UCODE_SIZE
* 3; i
++)
1150 WREG32(CP_ME_RAM_DATA
,
1151 be32_to_cpup(fw_data
++));
1153 fw_data
= (const __be32
*)rdev
->pfp_fw
->data
;
1154 WREG32(CP_PFP_UCODE_ADDR
, 0);
1155 for (i
= 0; i
< PFP_UCODE_SIZE
; i
++)
1156 WREG32(CP_PFP_UCODE_DATA
,
1157 be32_to_cpup(fw_data
++));
1159 WREG32(CP_PFP_UCODE_ADDR
, 0);
1160 WREG32(CP_ME_RAM_WADDR
, 0);
1161 WREG32(CP_ME_RAM_RADDR
, 0);
1165 int r600_cp_start(struct radeon_device
*rdev
)
1170 r
= radeon_ring_lock(rdev
, 7);
1172 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
1175 radeon_ring_write(rdev
, PACKET3(PACKET3_ME_INITIALIZE
, 5));
1176 radeon_ring_write(rdev
, 0x1);
1177 if (rdev
->family
< CHIP_RV770
) {
1178 radeon_ring_write(rdev
, 0x3);
1179 radeon_ring_write(rdev
, rdev
->config
.r600
.max_hw_contexts
- 1);
1181 radeon_ring_write(rdev
, 0x0);
1182 radeon_ring_write(rdev
, rdev
->config
.rv770
.max_hw_contexts
- 1);
1184 radeon_ring_write(rdev
, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1185 radeon_ring_write(rdev
, 0);
1186 radeon_ring_write(rdev
, 0);
1187 radeon_ring_unlock_commit(rdev
);
1190 WREG32(R_0086D8_CP_ME_CNTL
, cp_me
);
1194 int r600_cp_resume(struct radeon_device
*rdev
)
1201 WREG32(GRBM_SOFT_RESET
, SOFT_RESET_CP
);
1202 RREG32(GRBM_SOFT_RESET
);
1204 WREG32(GRBM_SOFT_RESET
, 0);
1206 /* Set ring buffer size */
1207 rb_bufsz
= drm_order(rdev
->cp
.ring_size
/ 8);
1209 WREG32(CP_RB_CNTL
, BUF_SWAP_32BIT
| RB_NO_UPDATE
|
1210 (drm_order(4096/8) << 8) | rb_bufsz
);
1212 WREG32(CP_RB_CNTL
, RB_NO_UPDATE
| (drm_order(4096/8) << 8) | rb_bufsz
);
1214 WREG32(CP_SEM_WAIT_TIMER
, 0x4);
1216 /* Set the write pointer delay */
1217 WREG32(CP_RB_WPTR_DELAY
, 0);
1219 /* Initialize the ring buffer's read and write pointers */
1220 tmp
= RREG32(CP_RB_CNTL
);
1221 WREG32(CP_RB_CNTL
, tmp
| RB_RPTR_WR_ENA
);
1222 WREG32(CP_RB_RPTR_WR
, 0);
1223 WREG32(CP_RB_WPTR
, 0);
1224 WREG32(CP_RB_RPTR_ADDR
, rdev
->cp
.gpu_addr
& 0xFFFFFFFF);
1225 WREG32(CP_RB_RPTR_ADDR_HI
, upper_32_bits(rdev
->cp
.gpu_addr
));
1227 WREG32(CP_RB_CNTL
, tmp
);
1229 WREG32(CP_RB_BASE
, rdev
->cp
.gpu_addr
>> 8);
1230 WREG32(CP_DEBUG
, (1 << 27) | (1 << 28));
1232 rdev
->cp
.rptr
= RREG32(CP_RB_RPTR
);
1233 rdev
->cp
.wptr
= RREG32(CP_RB_WPTR
);
1235 r600_cp_start(rdev
);
1236 rdev
->cp
.ready
= true;
1237 r
= radeon_ring_test(rdev
);
1239 rdev
->cp
.ready
= false;
1245 void r600_cp_commit(struct radeon_device
*rdev
)
1247 WREG32(CP_RB_WPTR
, rdev
->cp
.wptr
);
1248 (void)RREG32(CP_RB_WPTR
);
1251 void r600_ring_init(struct radeon_device
*rdev
, unsigned ring_size
)
1255 /* Align ring size */
1256 rb_bufsz
= drm_order(ring_size
/ 8);
1257 ring_size
= (1 << (rb_bufsz
+ 1)) * 4;
1258 rdev
->cp
.ring_size
= ring_size
;
1259 rdev
->cp
.align_mask
= 16 - 1;
1264 * GPU scratch registers helpers function.
1266 void r600_scratch_init(struct radeon_device
*rdev
)
1270 rdev
->scratch
.num_reg
= 7;
1271 for (i
= 0; i
< rdev
->scratch
.num_reg
; i
++) {
1272 rdev
->scratch
.free
[i
] = true;
1273 rdev
->scratch
.reg
[i
] = SCRATCH_REG0
+ (i
* 4);
1277 int r600_ring_test(struct radeon_device
*rdev
)
1284 r
= radeon_scratch_get(rdev
, &scratch
);
1286 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r
);
1289 WREG32(scratch
, 0xCAFEDEAD);
1290 r
= radeon_ring_lock(rdev
, 3);
1292 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
1293 radeon_scratch_free(rdev
, scratch
);
1296 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
1297 radeon_ring_write(rdev
, ((scratch
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
1298 radeon_ring_write(rdev
, 0xDEADBEEF);
1299 radeon_ring_unlock_commit(rdev
);
1300 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
1301 tmp
= RREG32(scratch
);
1302 if (tmp
== 0xDEADBEEF)
1306 if (i
< rdev
->usec_timeout
) {
1307 DRM_INFO("ring test succeeded in %d usecs\n", i
);
1309 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
1313 radeon_scratch_free(rdev
, scratch
);
1317 void r600_wb_disable(struct radeon_device
*rdev
)
1319 WREG32(SCRATCH_UMSK
, 0);
1320 if (rdev
->wb
.wb_obj
) {
1321 radeon_object_kunmap(rdev
->wb
.wb_obj
);
1322 radeon_object_unpin(rdev
->wb
.wb_obj
);
1326 void r600_wb_fini(struct radeon_device
*rdev
)
1328 r600_wb_disable(rdev
);
1329 if (rdev
->wb
.wb_obj
) {
1330 radeon_object_unref(&rdev
->wb
.wb_obj
);
1332 rdev
->wb
.wb_obj
= NULL
;
1336 int r600_wb_enable(struct radeon_device
*rdev
)
1340 if (rdev
->wb
.wb_obj
== NULL
) {
1341 r
= radeon_object_create(rdev
, NULL
, 4096, true,
1342 RADEON_GEM_DOMAIN_GTT
, false, &rdev
->wb
.wb_obj
);
1344 dev_warn(rdev
->dev
, "failed to create WB buffer (%d).\n", r
);
1347 r
= radeon_object_pin(rdev
->wb
.wb_obj
, RADEON_GEM_DOMAIN_GTT
,
1348 &rdev
->wb
.gpu_addr
);
1350 dev_warn(rdev
->dev
, "failed to pin WB buffer (%d).\n", r
);
1354 r
= radeon_object_kmap(rdev
->wb
.wb_obj
, (void **)&rdev
->wb
.wb
);
1356 dev_warn(rdev
->dev
, "failed to map WB buffer (%d).\n", r
);
1361 WREG32(SCRATCH_ADDR
, (rdev
->wb
.gpu_addr
>> 8) & 0xFFFFFFFF);
1362 WREG32(CP_RB_RPTR_ADDR
, (rdev
->wb
.gpu_addr
+ 1024) & 0xFFFFFFFC);
1363 WREG32(CP_RB_RPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ 1024) & 0xFF);
1364 WREG32(SCRATCH_UMSK
, 0xff);
1368 void r600_fence_ring_emit(struct radeon_device
*rdev
,
1369 struct radeon_fence
*fence
)
1371 /* Emit fence sequence & fire IRQ */
1372 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
1373 radeon_ring_write(rdev
, ((rdev
->fence_drv
.scratch_reg
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
1374 radeon_ring_write(rdev
, fence
->seq
);
1377 int r600_copy_dma(struct radeon_device
*rdev
,
1378 uint64_t src_offset
,
1379 uint64_t dst_offset
,
1381 struct radeon_fence
*fence
)
1383 /* FIXME: implement */
1387 int r600_copy_blit(struct radeon_device
*rdev
,
1388 uint64_t src_offset
, uint64_t dst_offset
,
1389 unsigned num_pages
, struct radeon_fence
*fence
)
1391 r600_blit_prepare_copy(rdev
, num_pages
* 4096);
1392 r600_kms_blit_copy(rdev
, src_offset
, dst_offset
, num_pages
* 4096);
1393 r600_blit_done_copy(rdev
, fence
);
1397 int r600_irq_process(struct radeon_device
*rdev
)
1399 /* FIXME: implement */
1403 int r600_irq_set(struct radeon_device
*rdev
)
1405 /* FIXME: implement */
1409 int r600_set_surface_reg(struct radeon_device
*rdev
, int reg
,
1410 uint32_t tiling_flags
, uint32_t pitch
,
1411 uint32_t offset
, uint32_t obj_size
)
1413 /* FIXME: implement */
1417 void r600_clear_surface_reg(struct radeon_device
*rdev
, int reg
)
1419 /* FIXME: implement */
1423 bool r600_card_posted(struct radeon_device
*rdev
)
1427 /* first check CRTCs */
1428 reg
= RREG32(D1CRTC_CONTROL
) |
1429 RREG32(D2CRTC_CONTROL
);
1433 /* then check MEM_SIZE, in case the crtcs are off */
1434 if (RREG32(CONFIG_MEMSIZE
))
1440 int r600_startup(struct radeon_device
*rdev
)
1444 r600_mc_program(rdev
);
1445 r
= r600_pcie_gart_enable(rdev
);
1448 r600_gpu_init(rdev
);
1450 r
= radeon_object_pin(rdev
->r600_blit
.shader_obj
, RADEON_GEM_DOMAIN_VRAM
,
1451 &rdev
->r600_blit
.shader_gpu_addr
);
1453 DRM_ERROR("failed to pin blit object %d\n", r
);
1457 r
= radeon_ring_init(rdev
, rdev
->cp
.ring_size
);
1460 r
= r600_cp_load_microcode(rdev
);
1463 r
= r600_cp_resume(rdev
);
1466 /* write back buffer are not vital so don't worry about failure */
1467 r600_wb_enable(rdev
);
1471 int r600_resume(struct radeon_device
*rdev
)
1475 if (r600_gpu_reset(rdev
)) {
1476 /* FIXME: what do we want to do here ? */
1479 atom_asic_init(rdev
->mode_info
.atom_context
);
1480 /* Initialize clocks */
1481 r
= radeon_clocks_init(rdev
);
1486 r
= r600_startup(rdev
);
1488 DRM_ERROR("r600 startup failed on resume\n");
1492 r
= r600_ib_test(rdev
);
1494 DRM_ERROR("radeon: failled testing IB (%d).\n", r
);
1500 int r600_suspend(struct radeon_device
*rdev
)
1502 /* FIXME: we should wait for ring to be empty */
1504 rdev
->cp
.ready
= false;
1505 r600_wb_disable(rdev
);
1506 r600_pcie_gart_disable(rdev
);
1507 /* unpin shaders bo */
1508 radeon_object_unpin(rdev
->r600_blit
.shader_obj
);
1512 /* Plan is to move initialization in that function and use
1513 * helper function so that radeon_device_init pretty much
1514 * do nothing more than calling asic specific function. This
1515 * should also allow to remove a bunch of callback function
1518 int r600_init(struct radeon_device
*rdev
)
1522 r
= radeon_dummy_page_init(rdev
);
1525 if (r600_debugfs_mc_info_init(rdev
)) {
1526 DRM_ERROR("Failed to register debugfs file for mc !\n");
1528 /* This don't do much */
1529 r
= radeon_gem_init(rdev
);
1533 if (!radeon_get_bios(rdev
)) {
1534 if (ASIC_IS_AVIVO(rdev
))
1537 /* Must be an ATOMBIOS */
1538 if (!rdev
->is_atom_bios
) {
1539 dev_err(rdev
->dev
, "Expecting atombios for R600 GPU\n");
1542 r
= radeon_atombios_init(rdev
);
1545 /* Post card if necessary */
1546 if (!r600_card_posted(rdev
) && rdev
->bios
) {
1547 DRM_INFO("GPU not posted. posting now...\n");
1548 atom_asic_init(rdev
->mode_info
.atom_context
);
1550 /* Initialize scratch registers */
1551 r600_scratch_init(rdev
);
1552 /* Initialize surface registers */
1553 radeon_surface_init(rdev
);
1554 radeon_get_clock_info(rdev
->ddev
);
1555 r
= radeon_clocks_init(rdev
);
1559 r
= radeon_fence_driver_init(rdev
);
1562 r
= r600_mc_init(rdev
);
1564 if (rdev
->flags
& RADEON_IS_AGP
) {
1565 /* Retry with disabling AGP */
1567 rdev
->flags
&= ~RADEON_IS_AGP
;
1568 return r600_init(rdev
);
1572 /* Memory manager */
1573 r
= radeon_object_init(rdev
);
1576 rdev
->cp
.ring_obj
= NULL
;
1577 r600_ring_init(rdev
, 1024 * 1024);
1579 if (!rdev
->me_fw
|| !rdev
->pfp_fw
) {
1580 r
= r600_cp_init_microcode(rdev
);
1582 DRM_ERROR("Failed to load firmware!\n");
1587 r
= r600_pcie_gart_init(rdev
);
1591 rdev
->accel_working
= true;
1592 r
= r600_blit_init(rdev
);
1594 DRM_ERROR("radeon: failled blitter (%d).\n", r
);
1598 r
= r600_startup(rdev
);
1600 if (rdev
->flags
& RADEON_IS_AGP
) {
1601 /* Retry with disabling AGP */
1603 rdev
->flags
&= ~RADEON_IS_AGP
;
1604 return r600_init(rdev
);
1608 radeon_ib_pool_fini(rdev
);
1609 radeon_ring_fini(rdev
);
1610 r600_pcie_gart_fini(rdev
);
1611 rdev
->accel_working
= false;
1613 if (rdev
->accel_working
) {
1614 r
= radeon_ib_pool_init(rdev
);
1616 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r
);
1617 rdev
->accel_working
= false;
1619 r
= r600_ib_test(rdev
);
1621 DRM_ERROR("radeon: failled testing IB (%d).\n", r
);
1622 rdev
->accel_working
= false;
1628 void r600_fini(struct radeon_device
*rdev
)
1630 /* Suspend operations */
1633 r600_blit_fini(rdev
);
1634 radeon_ring_fini(rdev
);
1636 r600_pcie_gart_fini(rdev
);
1637 radeon_gem_fini(rdev
);
1638 radeon_fence_driver_fini(rdev
);
1639 radeon_clocks_fini(rdev
);
1641 if (rdev
->flags
& RADEON_IS_AGP
)
1642 radeon_agp_fini(rdev
);
1644 radeon_object_fini(rdev
);
1645 radeon_atombios_fini(rdev
);
1648 radeon_dummy_page_fini(rdev
);
1655 void r600_ring_ib_execute(struct radeon_device
*rdev
, struct radeon_ib
*ib
)
1657 /* FIXME: implement */
1658 radeon_ring_write(rdev
, PACKET3(PACKET3_INDIRECT_BUFFER
, 2));
1659 radeon_ring_write(rdev
, ib
->gpu_addr
& 0xFFFFFFFC);
1660 radeon_ring_write(rdev
, upper_32_bits(ib
->gpu_addr
) & 0xFF);
1661 radeon_ring_write(rdev
, ib
->length_dw
);
1664 int r600_ib_test(struct radeon_device
*rdev
)
1666 struct radeon_ib
*ib
;
1672 r
= radeon_scratch_get(rdev
, &scratch
);
1674 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r
);
1677 WREG32(scratch
, 0xCAFEDEAD);
1678 r
= radeon_ib_get(rdev
, &ib
);
1680 DRM_ERROR("radeon: failed to get ib (%d).\n", r
);
1683 ib
->ptr
[0] = PACKET3(PACKET3_SET_CONFIG_REG
, 1);
1684 ib
->ptr
[1] = ((scratch
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
1685 ib
->ptr
[2] = 0xDEADBEEF;
1686 ib
->ptr
[3] = PACKET2(0);
1687 ib
->ptr
[4] = PACKET2(0);
1688 ib
->ptr
[5] = PACKET2(0);
1689 ib
->ptr
[6] = PACKET2(0);
1690 ib
->ptr
[7] = PACKET2(0);
1691 ib
->ptr
[8] = PACKET2(0);
1692 ib
->ptr
[9] = PACKET2(0);
1693 ib
->ptr
[10] = PACKET2(0);
1694 ib
->ptr
[11] = PACKET2(0);
1695 ib
->ptr
[12] = PACKET2(0);
1696 ib
->ptr
[13] = PACKET2(0);
1697 ib
->ptr
[14] = PACKET2(0);
1698 ib
->ptr
[15] = PACKET2(0);
1700 r
= radeon_ib_schedule(rdev
, ib
);
1702 radeon_scratch_free(rdev
, scratch
);
1703 radeon_ib_free(rdev
, &ib
);
1704 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r
);
1707 r
= radeon_fence_wait(ib
->fence
, false);
1709 DRM_ERROR("radeon: fence wait failed (%d).\n", r
);
1712 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
1713 tmp
= RREG32(scratch
);
1714 if (tmp
== 0xDEADBEEF)
1718 if (i
< rdev
->usec_timeout
) {
1719 DRM_INFO("ib test succeeded in %u usecs\n", i
);
1721 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
1725 radeon_scratch_free(rdev
, scratch
);
1726 radeon_ib_free(rdev
, &ib
);
1736 #if defined(CONFIG_DEBUG_FS)
1738 static int r600_debugfs_cp_ring_info(struct seq_file
*m
, void *data
)
1740 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1741 struct drm_device
*dev
= node
->minor
->dev
;
1742 struct radeon_device
*rdev
= dev
->dev_private
;
1744 unsigned count
, i
, j
;
1746 radeon_ring_free_size(rdev
);
1747 rdp
= RREG32(CP_RB_RPTR
);
1748 wdp
= RREG32(CP_RB_WPTR
);
1749 count
= (rdp
+ rdev
->cp
.ring_size
- wdp
) & rdev
->cp
.ptr_mask
;
1750 seq_printf(m
, "CP_STAT 0x%08x\n", RREG32(CP_STAT
));
1751 seq_printf(m
, "CP_RB_WPTR 0x%08x\n", wdp
);
1752 seq_printf(m
, "CP_RB_RPTR 0x%08x\n", rdp
);
1753 seq_printf(m
, "%u free dwords in ring\n", rdev
->cp
.ring_free_dw
);
1754 seq_printf(m
, "%u dwords in ring\n", count
);
1755 for (j
= 0; j
<= count
; j
++) {
1756 i
= (rdp
+ j
) & rdev
->cp
.ptr_mask
;
1757 seq_printf(m
, "r[%04d]=0x%08x\n", i
, rdev
->cp
.ring
[i
]);
1762 static int r600_debugfs_mc_info(struct seq_file
*m
, void *data
)
1764 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1765 struct drm_device
*dev
= node
->minor
->dev
;
1766 struct radeon_device
*rdev
= dev
->dev_private
;
1768 DREG32_SYS(m
, rdev
, R_000E50_SRBM_STATUS
);
1769 DREG32_SYS(m
, rdev
, VM_L2_STATUS
);
1773 static struct drm_info_list r600_mc_info_list
[] = {
1774 {"r600_mc_info", r600_debugfs_mc_info
, 0, NULL
},
1775 {"r600_ring_info", r600_debugfs_cp_ring_info
, 0, NULL
},
1779 int r600_debugfs_mc_info_init(struct radeon_device
*rdev
)
1781 #if defined(CONFIG_DEBUG_FS)
1782 return radeon_debugfs_add_files(rdev
, r600_mc_info_list
, ARRAY_SIZE(r600_mc_info_list
));