2 * Copyright 2010 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
24 #include <linux/firmware.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
29 #include "radeon_asic.h"
30 #include "radeon_drm.h"
31 #include "evergreend.h"
34 #include "evergreen_reg.h"
35 #include "evergreen_blit_shaders.h"
37 #define EVERGREEN_PFP_UCODE_SIZE 1120
38 #define EVERGREEN_PM4_UCODE_SIZE 1376
40 static void evergreen_gpu_init(struct radeon_device
*rdev
);
41 void evergreen_fini(struct radeon_device
*rdev
);
42 static void evergreen_pcie_gen2_enable(struct radeon_device
*rdev
);
44 void evergreen_pre_page_flip(struct radeon_device
*rdev
, int crtc
)
46 struct radeon_crtc
*radeon_crtc
= rdev
->mode_info
.crtcs
[crtc
];
49 /* make sure flip is at vb rather than hb */
50 tmp
= RREG32(EVERGREEN_GRPH_FLIP_CONTROL
+ radeon_crtc
->crtc_offset
);
51 tmp
&= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN
;
52 WREG32(EVERGREEN_GRPH_FLIP_CONTROL
+ radeon_crtc
->crtc_offset
, tmp
);
54 /* set pageflip to happen anywhere in vblank interval */
55 WREG32(EVERGREEN_MASTER_UPDATE_MODE
+ radeon_crtc
->crtc_offset
, 0);
57 /* enable the pflip int */
58 radeon_irq_kms_pflip_irq_get(rdev
, crtc
);
61 void evergreen_post_page_flip(struct radeon_device
*rdev
, int crtc
)
63 /* disable the pflip int */
64 radeon_irq_kms_pflip_irq_put(rdev
, crtc
);
67 u32
evergreen_page_flip(struct radeon_device
*rdev
, int crtc_id
, u64 crtc_base
)
69 struct radeon_crtc
*radeon_crtc
= rdev
->mode_info
.crtcs
[crtc_id
];
70 u32 tmp
= RREG32(EVERGREEN_GRPH_UPDATE
+ radeon_crtc
->crtc_offset
);
72 /* Lock the graphics update lock */
73 tmp
|= EVERGREEN_GRPH_UPDATE_LOCK
;
74 WREG32(EVERGREEN_GRPH_UPDATE
+ radeon_crtc
->crtc_offset
, tmp
);
76 /* update the scanout addresses */
77 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ radeon_crtc
->crtc_offset
,
78 upper_32_bits(crtc_base
));
79 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ radeon_crtc
->crtc_offset
,
82 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ radeon_crtc
->crtc_offset
,
83 upper_32_bits(crtc_base
));
84 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ radeon_crtc
->crtc_offset
,
87 /* Wait for update_pending to go high. */
88 while (!(RREG32(EVERGREEN_GRPH_UPDATE
+ radeon_crtc
->crtc_offset
) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING
));
89 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
91 /* Unlock the lock, so double-buffering can take place inside vblank */
92 tmp
&= ~EVERGREEN_GRPH_UPDATE_LOCK
;
93 WREG32(EVERGREEN_GRPH_UPDATE
+ radeon_crtc
->crtc_offset
, tmp
);
95 /* Return current update_pending status: */
96 return RREG32(EVERGREEN_GRPH_UPDATE
+ radeon_crtc
->crtc_offset
) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING
;
99 /* get temperature in millidegrees */
100 u32
evergreen_get_temp(struct radeon_device
*rdev
)
102 u32 temp
= (RREG32(CG_MULT_THERMAL_STATUS
) & ASIC_T_MASK
) >>
106 if ((temp
>> 10) & 1)
108 else if ((temp
>> 9) & 1)
111 actual_temp
= (temp
>> 1) & 0xff;
113 return actual_temp
* 1000;
116 u32
sumo_get_temp(struct radeon_device
*rdev
)
118 u32 temp
= RREG32(CG_THERMAL_STATUS
) & 0xff;
119 u32 actual_temp
= (temp
>> 1) & 0xff;
121 return actual_temp
* 1000;
124 void evergreen_pm_misc(struct radeon_device
*rdev
)
126 int req_ps_idx
= rdev
->pm
.requested_power_state_index
;
127 int req_cm_idx
= rdev
->pm
.requested_clock_mode_index
;
128 struct radeon_power_state
*ps
= &rdev
->pm
.power_state
[req_ps_idx
];
129 struct radeon_voltage
*voltage
= &ps
->clock_info
[req_cm_idx
].voltage
;
131 if ((voltage
->type
== VOLTAGE_SW
) && voltage
->voltage
) {
132 if (voltage
->voltage
!= rdev
->pm
.current_vddc
) {
133 radeon_atom_set_voltage(rdev
, voltage
->voltage
);
134 rdev
->pm
.current_vddc
= voltage
->voltage
;
135 DRM_DEBUG("Setting: v: %d\n", voltage
->voltage
);
140 void evergreen_pm_prepare(struct radeon_device
*rdev
)
142 struct drm_device
*ddev
= rdev
->ddev
;
143 struct drm_crtc
*crtc
;
144 struct radeon_crtc
*radeon_crtc
;
147 /* disable any active CRTCs */
148 list_for_each_entry(crtc
, &ddev
->mode_config
.crtc_list
, head
) {
149 radeon_crtc
= to_radeon_crtc(crtc
);
150 if (radeon_crtc
->enabled
) {
151 tmp
= RREG32(EVERGREEN_CRTC_CONTROL
+ radeon_crtc
->crtc_offset
);
152 tmp
|= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE
;
153 WREG32(EVERGREEN_CRTC_CONTROL
+ radeon_crtc
->crtc_offset
, tmp
);
158 void evergreen_pm_finish(struct radeon_device
*rdev
)
160 struct drm_device
*ddev
= rdev
->ddev
;
161 struct drm_crtc
*crtc
;
162 struct radeon_crtc
*radeon_crtc
;
165 /* enable any active CRTCs */
166 list_for_each_entry(crtc
, &ddev
->mode_config
.crtc_list
, head
) {
167 radeon_crtc
= to_radeon_crtc(crtc
);
168 if (radeon_crtc
->enabled
) {
169 tmp
= RREG32(EVERGREEN_CRTC_CONTROL
+ radeon_crtc
->crtc_offset
);
170 tmp
&= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE
;
171 WREG32(EVERGREEN_CRTC_CONTROL
+ radeon_crtc
->crtc_offset
, tmp
);
176 bool evergreen_hpd_sense(struct radeon_device
*rdev
, enum radeon_hpd_id hpd
)
178 bool connected
= false;
182 if (RREG32(DC_HPD1_INT_STATUS
) & DC_HPDx_SENSE
)
186 if (RREG32(DC_HPD2_INT_STATUS
) & DC_HPDx_SENSE
)
190 if (RREG32(DC_HPD3_INT_STATUS
) & DC_HPDx_SENSE
)
194 if (RREG32(DC_HPD4_INT_STATUS
) & DC_HPDx_SENSE
)
198 if (RREG32(DC_HPD5_INT_STATUS
) & DC_HPDx_SENSE
)
202 if (RREG32(DC_HPD6_INT_STATUS
) & DC_HPDx_SENSE
)
212 void evergreen_hpd_set_polarity(struct radeon_device
*rdev
,
213 enum radeon_hpd_id hpd
)
216 bool connected
= evergreen_hpd_sense(rdev
, hpd
);
220 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
222 tmp
&= ~DC_HPDx_INT_POLARITY
;
224 tmp
|= DC_HPDx_INT_POLARITY
;
225 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
228 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
230 tmp
&= ~DC_HPDx_INT_POLARITY
;
232 tmp
|= DC_HPDx_INT_POLARITY
;
233 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
236 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
238 tmp
&= ~DC_HPDx_INT_POLARITY
;
240 tmp
|= DC_HPDx_INT_POLARITY
;
241 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
244 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
246 tmp
&= ~DC_HPDx_INT_POLARITY
;
248 tmp
|= DC_HPDx_INT_POLARITY
;
249 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
252 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
254 tmp
&= ~DC_HPDx_INT_POLARITY
;
256 tmp
|= DC_HPDx_INT_POLARITY
;
257 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
260 tmp
= RREG32(DC_HPD6_INT_CONTROL
);
262 tmp
&= ~DC_HPDx_INT_POLARITY
;
264 tmp
|= DC_HPDx_INT_POLARITY
;
265 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
272 void evergreen_hpd_init(struct radeon_device
*rdev
)
274 struct drm_device
*dev
= rdev
->ddev
;
275 struct drm_connector
*connector
;
276 u32 tmp
= DC_HPDx_CONNECTION_TIMER(0x9c4) |
277 DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN
;
279 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
280 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
281 switch (radeon_connector
->hpd
.hpd
) {
283 WREG32(DC_HPD1_CONTROL
, tmp
);
284 rdev
->irq
.hpd
[0] = true;
287 WREG32(DC_HPD2_CONTROL
, tmp
);
288 rdev
->irq
.hpd
[1] = true;
291 WREG32(DC_HPD3_CONTROL
, tmp
);
292 rdev
->irq
.hpd
[2] = true;
295 WREG32(DC_HPD4_CONTROL
, tmp
);
296 rdev
->irq
.hpd
[3] = true;
299 WREG32(DC_HPD5_CONTROL
, tmp
);
300 rdev
->irq
.hpd
[4] = true;
303 WREG32(DC_HPD6_CONTROL
, tmp
);
304 rdev
->irq
.hpd
[5] = true;
310 if (rdev
->irq
.installed
)
311 evergreen_irq_set(rdev
);
314 void evergreen_hpd_fini(struct radeon_device
*rdev
)
316 struct drm_device
*dev
= rdev
->ddev
;
317 struct drm_connector
*connector
;
319 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
320 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
321 switch (radeon_connector
->hpd
.hpd
) {
323 WREG32(DC_HPD1_CONTROL
, 0);
324 rdev
->irq
.hpd
[0] = false;
327 WREG32(DC_HPD2_CONTROL
, 0);
328 rdev
->irq
.hpd
[1] = false;
331 WREG32(DC_HPD3_CONTROL
, 0);
332 rdev
->irq
.hpd
[2] = false;
335 WREG32(DC_HPD4_CONTROL
, 0);
336 rdev
->irq
.hpd
[3] = false;
339 WREG32(DC_HPD5_CONTROL
, 0);
340 rdev
->irq
.hpd
[4] = false;
343 WREG32(DC_HPD6_CONTROL
, 0);
344 rdev
->irq
.hpd
[5] = false;
352 /* watermark setup */
354 static u32
evergreen_line_buffer_adjust(struct radeon_device
*rdev
,
355 struct radeon_crtc
*radeon_crtc
,
356 struct drm_display_mode
*mode
,
357 struct drm_display_mode
*other_mode
)
362 * There are 3 line buffers, each one shared by 2 display controllers.
363 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
364 * the display controllers. The paritioning is done via one of four
365 * preset allocations specified in bits 2:0:
366 * first display controller
367 * 0 - first half of lb (3840 * 2)
368 * 1 - first 3/4 of lb (5760 * 2)
369 * 2 - whole lb (7680 * 2)
370 * 3 - first 1/4 of lb (1920 * 2)
371 * second display controller
372 * 4 - second half of lb (3840 * 2)
373 * 5 - second 3/4 of lb (5760 * 2)
374 * 6 - whole lb (7680 * 2)
375 * 7 - last 1/4 of lb (1920 * 2)
377 if (mode
&& other_mode
) {
378 if (mode
->hdisplay
> other_mode
->hdisplay
) {
379 if (mode
->hdisplay
> 2560)
383 } else if (other_mode
->hdisplay
> mode
->hdisplay
) {
384 if (other_mode
->hdisplay
> 2560)
395 /* second controller of the pair uses second half of the lb */
396 if (radeon_crtc
->crtc_id
% 2)
398 WREG32(DC_LB_MEMORY_SPLIT
+ radeon_crtc
->crtc_offset
, tmp
);
417 static u32
evergreen_get_number_of_dram_channels(struct radeon_device
*rdev
)
419 u32 tmp
= RREG32(MC_SHARED_CHMAP
);
421 switch ((tmp
& NOOFCHAN_MASK
) >> NOOFCHAN_SHIFT
) {
434 struct evergreen_wm_params
{
435 u32 dram_channels
; /* number of dram channels */
436 u32 yclk
; /* bandwidth per dram data pin in kHz */
437 u32 sclk
; /* engine clock in kHz */
438 u32 disp_clk
; /* display clock in kHz */
439 u32 src_width
; /* viewport width */
440 u32 active_time
; /* active display time in ns */
441 u32 blank_time
; /* blank time in ns */
442 bool interlaced
; /* mode is interlaced */
443 fixed20_12 vsc
; /* vertical scale ratio */
444 u32 num_heads
; /* number of active crtcs */
445 u32 bytes_per_pixel
; /* bytes per pixel display + overlay */
446 u32 lb_size
; /* line buffer allocated to pipe */
447 u32 vtaps
; /* vertical scaler taps */
450 static u32
evergreen_dram_bandwidth(struct evergreen_wm_params
*wm
)
452 /* Calculate DRAM Bandwidth and the part allocated to display. */
453 fixed20_12 dram_efficiency
; /* 0.7 */
454 fixed20_12 yclk
, dram_channels
, bandwidth
;
457 a
.full
= dfixed_const(1000);
458 yclk
.full
= dfixed_const(wm
->yclk
);
459 yclk
.full
= dfixed_div(yclk
, a
);
460 dram_channels
.full
= dfixed_const(wm
->dram_channels
* 4);
461 a
.full
= dfixed_const(10);
462 dram_efficiency
.full
= dfixed_const(7);
463 dram_efficiency
.full
= dfixed_div(dram_efficiency
, a
);
464 bandwidth
.full
= dfixed_mul(dram_channels
, yclk
);
465 bandwidth
.full
= dfixed_mul(bandwidth
, dram_efficiency
);
467 return dfixed_trunc(bandwidth
);
470 static u32
evergreen_dram_bandwidth_for_display(struct evergreen_wm_params
*wm
)
472 /* Calculate DRAM Bandwidth and the part allocated to display. */
473 fixed20_12 disp_dram_allocation
; /* 0.3 to 0.7 */
474 fixed20_12 yclk
, dram_channels
, bandwidth
;
477 a
.full
= dfixed_const(1000);
478 yclk
.full
= dfixed_const(wm
->yclk
);
479 yclk
.full
= dfixed_div(yclk
, a
);
480 dram_channels
.full
= dfixed_const(wm
->dram_channels
* 4);
481 a
.full
= dfixed_const(10);
482 disp_dram_allocation
.full
= dfixed_const(3); /* XXX worse case value 0.3 */
483 disp_dram_allocation
.full
= dfixed_div(disp_dram_allocation
, a
);
484 bandwidth
.full
= dfixed_mul(dram_channels
, yclk
);
485 bandwidth
.full
= dfixed_mul(bandwidth
, disp_dram_allocation
);
487 return dfixed_trunc(bandwidth
);
490 static u32
evergreen_data_return_bandwidth(struct evergreen_wm_params
*wm
)
492 /* Calculate the display Data return Bandwidth */
493 fixed20_12 return_efficiency
; /* 0.8 */
494 fixed20_12 sclk
, bandwidth
;
497 a
.full
= dfixed_const(1000);
498 sclk
.full
= dfixed_const(wm
->sclk
);
499 sclk
.full
= dfixed_div(sclk
, a
);
500 a
.full
= dfixed_const(10);
501 return_efficiency
.full
= dfixed_const(8);
502 return_efficiency
.full
= dfixed_div(return_efficiency
, a
);
503 a
.full
= dfixed_const(32);
504 bandwidth
.full
= dfixed_mul(a
, sclk
);
505 bandwidth
.full
= dfixed_mul(bandwidth
, return_efficiency
);
507 return dfixed_trunc(bandwidth
);
510 static u32
evergreen_dmif_request_bandwidth(struct evergreen_wm_params
*wm
)
512 /* Calculate the DMIF Request Bandwidth */
513 fixed20_12 disp_clk_request_efficiency
; /* 0.8 */
514 fixed20_12 disp_clk
, bandwidth
;
517 a
.full
= dfixed_const(1000);
518 disp_clk
.full
= dfixed_const(wm
->disp_clk
);
519 disp_clk
.full
= dfixed_div(disp_clk
, a
);
520 a
.full
= dfixed_const(10);
521 disp_clk_request_efficiency
.full
= dfixed_const(8);
522 disp_clk_request_efficiency
.full
= dfixed_div(disp_clk_request_efficiency
, a
);
523 a
.full
= dfixed_const(32);
524 bandwidth
.full
= dfixed_mul(a
, disp_clk
);
525 bandwidth
.full
= dfixed_mul(bandwidth
, disp_clk_request_efficiency
);
527 return dfixed_trunc(bandwidth
);
530 static u32
evergreen_available_bandwidth(struct evergreen_wm_params
*wm
)
532 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
533 u32 dram_bandwidth
= evergreen_dram_bandwidth(wm
);
534 u32 data_return_bandwidth
= evergreen_data_return_bandwidth(wm
);
535 u32 dmif_req_bandwidth
= evergreen_dmif_request_bandwidth(wm
);
537 return min(dram_bandwidth
, min(data_return_bandwidth
, dmif_req_bandwidth
));
540 static u32
evergreen_average_bandwidth(struct evergreen_wm_params
*wm
)
542 /* Calculate the display mode Average Bandwidth
543 * DisplayMode should contain the source and destination dimensions,
547 fixed20_12 line_time
;
548 fixed20_12 src_width
;
549 fixed20_12 bandwidth
;
552 a
.full
= dfixed_const(1000);
553 line_time
.full
= dfixed_const(wm
->active_time
+ wm
->blank_time
);
554 line_time
.full
= dfixed_div(line_time
, a
);
555 bpp
.full
= dfixed_const(wm
->bytes_per_pixel
);
556 src_width
.full
= dfixed_const(wm
->src_width
);
557 bandwidth
.full
= dfixed_mul(src_width
, bpp
);
558 bandwidth
.full
= dfixed_mul(bandwidth
, wm
->vsc
);
559 bandwidth
.full
= dfixed_div(bandwidth
, line_time
);
561 return dfixed_trunc(bandwidth
);
564 static u32
evergreen_latency_watermark(struct evergreen_wm_params
*wm
)
566 /* First calcualte the latency in ns */
567 u32 mc_latency
= 2000; /* 2000 ns. */
568 u32 available_bandwidth
= evergreen_available_bandwidth(wm
);
569 u32 worst_chunk_return_time
= (512 * 8 * 1000) / available_bandwidth
;
570 u32 cursor_line_pair_return_time
= (128 * 4 * 1000) / available_bandwidth
;
571 u32 dc_latency
= 40000000 / wm
->disp_clk
; /* dc pipe latency */
572 u32 other_heads_data_return_time
= ((wm
->num_heads
+ 1) * worst_chunk_return_time
) +
573 (wm
->num_heads
* cursor_line_pair_return_time
);
574 u32 latency
= mc_latency
+ other_heads_data_return_time
+ dc_latency
;
575 u32 max_src_lines_per_dst_line
, lb_fill_bw
, line_fill_time
;
578 if (wm
->num_heads
== 0)
581 a
.full
= dfixed_const(2);
582 b
.full
= dfixed_const(1);
583 if ((wm
->vsc
.full
> a
.full
) ||
584 ((wm
->vsc
.full
> b
.full
) && (wm
->vtaps
>= 3)) ||
586 ((wm
->vsc
.full
>= a
.full
) && wm
->interlaced
))
587 max_src_lines_per_dst_line
= 4;
589 max_src_lines_per_dst_line
= 2;
591 a
.full
= dfixed_const(available_bandwidth
);
592 b
.full
= dfixed_const(wm
->num_heads
);
593 a
.full
= dfixed_div(a
, b
);
595 b
.full
= dfixed_const(1000);
596 c
.full
= dfixed_const(wm
->disp_clk
);
597 b
.full
= dfixed_div(c
, b
);
598 c
.full
= dfixed_const(wm
->bytes_per_pixel
);
599 b
.full
= dfixed_mul(b
, c
);
601 lb_fill_bw
= min(dfixed_trunc(a
), dfixed_trunc(b
));
603 a
.full
= dfixed_const(max_src_lines_per_dst_line
* wm
->src_width
* wm
->bytes_per_pixel
);
604 b
.full
= dfixed_const(1000);
605 c
.full
= dfixed_const(lb_fill_bw
);
606 b
.full
= dfixed_div(c
, b
);
607 a
.full
= dfixed_div(a
, b
);
608 line_fill_time
= dfixed_trunc(a
);
610 if (line_fill_time
< wm
->active_time
)
613 return latency
+ (line_fill_time
- wm
->active_time
);
617 static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params
*wm
)
619 if (evergreen_average_bandwidth(wm
) <=
620 (evergreen_dram_bandwidth_for_display(wm
) / wm
->num_heads
))
626 static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params
*wm
)
628 if (evergreen_average_bandwidth(wm
) <=
629 (evergreen_available_bandwidth(wm
) / wm
->num_heads
))
635 static bool evergreen_check_latency_hiding(struct evergreen_wm_params
*wm
)
637 u32 lb_partitions
= wm
->lb_size
/ wm
->src_width
;
638 u32 line_time
= wm
->active_time
+ wm
->blank_time
;
639 u32 latency_tolerant_lines
;
643 a
.full
= dfixed_const(1);
644 if (wm
->vsc
.full
> a
.full
)
645 latency_tolerant_lines
= 1;
647 if (lb_partitions
<= (wm
->vtaps
+ 1))
648 latency_tolerant_lines
= 1;
650 latency_tolerant_lines
= 2;
653 latency_hiding
= (latency_tolerant_lines
* line_time
+ wm
->blank_time
);
655 if (evergreen_latency_watermark(wm
) <= latency_hiding
)
661 static void evergreen_program_watermarks(struct radeon_device
*rdev
,
662 struct radeon_crtc
*radeon_crtc
,
663 u32 lb_size
, u32 num_heads
)
665 struct drm_display_mode
*mode
= &radeon_crtc
->base
.mode
;
666 struct evergreen_wm_params wm
;
669 u32 latency_watermark_a
= 0, latency_watermark_b
= 0;
670 u32 priority_a_mark
= 0, priority_b_mark
= 0;
671 u32 priority_a_cnt
= PRIORITY_OFF
;
672 u32 priority_b_cnt
= PRIORITY_OFF
;
673 u32 pipe_offset
= radeon_crtc
->crtc_id
* 16;
674 u32 tmp
, arb_control3
;
677 if (radeon_crtc
->base
.enabled
&& num_heads
&& mode
) {
678 pixel_period
= 1000000 / (u32
)mode
->clock
;
679 line_time
= min((u32
)mode
->crtc_htotal
* pixel_period
, (u32
)65535);
683 wm
.yclk
= rdev
->pm
.current_mclk
* 10;
684 wm
.sclk
= rdev
->pm
.current_sclk
* 10;
685 wm
.disp_clk
= mode
->clock
;
686 wm
.src_width
= mode
->crtc_hdisplay
;
687 wm
.active_time
= mode
->crtc_hdisplay
* pixel_period
;
688 wm
.blank_time
= line_time
- wm
.active_time
;
689 wm
.interlaced
= false;
690 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
691 wm
.interlaced
= true;
692 wm
.vsc
= radeon_crtc
->vsc
;
694 if (radeon_crtc
->rmx_type
!= RMX_OFF
)
696 wm
.bytes_per_pixel
= 4; /* XXX: get this from fb config */
697 wm
.lb_size
= lb_size
;
698 wm
.dram_channels
= evergreen_get_number_of_dram_channels(rdev
);
699 wm
.num_heads
= num_heads
;
701 /* set for high clocks */
702 latency_watermark_a
= min(evergreen_latency_watermark(&wm
), (u32
)65535);
703 /* set for low clocks */
704 /* wm.yclk = low clk; wm.sclk = low clk */
705 latency_watermark_b
= min(evergreen_latency_watermark(&wm
), (u32
)65535);
707 /* possibly force display priority to high */
708 /* should really do this at mode validation time... */
709 if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm
) ||
710 !evergreen_average_bandwidth_vs_available_bandwidth(&wm
) ||
711 !evergreen_check_latency_hiding(&wm
) ||
712 (rdev
->disp_priority
== 2)) {
713 DRM_INFO("force priority to high\n");
714 priority_a_cnt
|= PRIORITY_ALWAYS_ON
;
715 priority_b_cnt
|= PRIORITY_ALWAYS_ON
;
718 a
.full
= dfixed_const(1000);
719 b
.full
= dfixed_const(mode
->clock
);
720 b
.full
= dfixed_div(b
, a
);
721 c
.full
= dfixed_const(latency_watermark_a
);
722 c
.full
= dfixed_mul(c
, b
);
723 c
.full
= dfixed_mul(c
, radeon_crtc
->hsc
);
724 c
.full
= dfixed_div(c
, a
);
725 a
.full
= dfixed_const(16);
726 c
.full
= dfixed_div(c
, a
);
727 priority_a_mark
= dfixed_trunc(c
);
728 priority_a_cnt
|= priority_a_mark
& PRIORITY_MARK_MASK
;
730 a
.full
= dfixed_const(1000);
731 b
.full
= dfixed_const(mode
->clock
);
732 b
.full
= dfixed_div(b
, a
);
733 c
.full
= dfixed_const(latency_watermark_b
);
734 c
.full
= dfixed_mul(c
, b
);
735 c
.full
= dfixed_mul(c
, radeon_crtc
->hsc
);
736 c
.full
= dfixed_div(c
, a
);
737 a
.full
= dfixed_const(16);
738 c
.full
= dfixed_div(c
, a
);
739 priority_b_mark
= dfixed_trunc(c
);
740 priority_b_cnt
|= priority_b_mark
& PRIORITY_MARK_MASK
;
744 arb_control3
= RREG32(PIPE0_ARBITRATION_CONTROL3
+ pipe_offset
);
746 tmp
&= ~LATENCY_WATERMARK_MASK(3);
747 tmp
|= LATENCY_WATERMARK_MASK(1);
748 WREG32(PIPE0_ARBITRATION_CONTROL3
+ pipe_offset
, tmp
);
749 WREG32(PIPE0_LATENCY_CONTROL
+ pipe_offset
,
750 (LATENCY_LOW_WATERMARK(latency_watermark_a
) |
751 LATENCY_HIGH_WATERMARK(line_time
)));
753 tmp
= RREG32(PIPE0_ARBITRATION_CONTROL3
+ pipe_offset
);
754 tmp
&= ~LATENCY_WATERMARK_MASK(3);
755 tmp
|= LATENCY_WATERMARK_MASK(2);
756 WREG32(PIPE0_ARBITRATION_CONTROL3
+ pipe_offset
, tmp
);
757 WREG32(PIPE0_LATENCY_CONTROL
+ pipe_offset
,
758 (LATENCY_LOW_WATERMARK(latency_watermark_b
) |
759 LATENCY_HIGH_WATERMARK(line_time
)));
760 /* restore original selection */
761 WREG32(PIPE0_ARBITRATION_CONTROL3
+ pipe_offset
, arb_control3
);
763 /* write the priority marks */
764 WREG32(PRIORITY_A_CNT
+ radeon_crtc
->crtc_offset
, priority_a_cnt
);
765 WREG32(PRIORITY_B_CNT
+ radeon_crtc
->crtc_offset
, priority_b_cnt
);
769 void evergreen_bandwidth_update(struct radeon_device
*rdev
)
771 struct drm_display_mode
*mode0
= NULL
;
772 struct drm_display_mode
*mode1
= NULL
;
773 u32 num_heads
= 0, lb_size
;
776 radeon_update_display_priority(rdev
);
778 for (i
= 0; i
< rdev
->num_crtc
; i
++) {
779 if (rdev
->mode_info
.crtcs
[i
]->base
.enabled
)
782 for (i
= 0; i
< rdev
->num_crtc
; i
+= 2) {
783 mode0
= &rdev
->mode_info
.crtcs
[i
]->base
.mode
;
784 mode1
= &rdev
->mode_info
.crtcs
[i
+1]->base
.mode
;
785 lb_size
= evergreen_line_buffer_adjust(rdev
, rdev
->mode_info
.crtcs
[i
], mode0
, mode1
);
786 evergreen_program_watermarks(rdev
, rdev
->mode_info
.crtcs
[i
], lb_size
, num_heads
);
787 lb_size
= evergreen_line_buffer_adjust(rdev
, rdev
->mode_info
.crtcs
[i
+1], mode1
, mode0
);
788 evergreen_program_watermarks(rdev
, rdev
->mode_info
.crtcs
[i
+1], lb_size
, num_heads
);
792 static int evergreen_mc_wait_for_idle(struct radeon_device
*rdev
)
797 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
799 tmp
= RREG32(SRBM_STATUS
) & 0x1F00;
810 void evergreen_pcie_gart_tlb_flush(struct radeon_device
*rdev
)
815 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);
817 WREG32(VM_CONTEXT0_REQUEST_RESPONSE
, REQUEST_TYPE(1));
818 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
820 tmp
= RREG32(VM_CONTEXT0_REQUEST_RESPONSE
);
821 tmp
= (tmp
& RESPONSE_TYPE_MASK
) >> RESPONSE_TYPE_SHIFT
;
823 printk(KERN_WARNING
"[drm] r600 flush TLB failed\n");
833 int evergreen_pcie_gart_enable(struct radeon_device
*rdev
)
838 if (rdev
->gart
.table
.vram
.robj
== NULL
) {
839 dev_err(rdev
->dev
, "No VRAM object for PCIE GART.\n");
842 r
= radeon_gart_table_vram_pin(rdev
);
845 radeon_gart_restore(rdev
);
847 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
848 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
849 EFFECTIVE_L2_QUEUE_SIZE(7));
850 WREG32(VM_L2_CNTL2
, 0);
851 WREG32(VM_L2_CNTL3
, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
852 /* Setup TLB control */
853 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
854 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
855 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU
|
856 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
857 WREG32(MC_VM_MD_L1_TLB0_CNTL
, tmp
);
858 WREG32(MC_VM_MD_L1_TLB1_CNTL
, tmp
);
859 WREG32(MC_VM_MD_L1_TLB2_CNTL
, tmp
);
860 WREG32(MC_VM_MB_L1_TLB0_CNTL
, tmp
);
861 WREG32(MC_VM_MB_L1_TLB1_CNTL
, tmp
);
862 WREG32(MC_VM_MB_L1_TLB2_CNTL
, tmp
);
863 WREG32(MC_VM_MB_L1_TLB3_CNTL
, tmp
);
864 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR
, rdev
->mc
.gtt_start
>> 12);
865 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR
, rdev
->mc
.gtt_end
>> 12);
866 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, rdev
->gart
.table_addr
>> 12);
867 WREG32(VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
| PAGE_TABLE_DEPTH(0) |
868 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
);
869 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
870 (u32
)(rdev
->dummy_page
.addr
>> 12));
871 WREG32(VM_CONTEXT1_CNTL
, 0);
873 evergreen_pcie_gart_tlb_flush(rdev
);
874 rdev
->gart
.ready
= true;
878 void evergreen_pcie_gart_disable(struct radeon_device
*rdev
)
883 /* Disable all tables */
884 WREG32(VM_CONTEXT0_CNTL
, 0);
885 WREG32(VM_CONTEXT1_CNTL
, 0);
888 WREG32(VM_L2_CNTL
, ENABLE_L2_FRAGMENT_PROCESSING
|
889 EFFECTIVE_L2_QUEUE_SIZE(7));
890 WREG32(VM_L2_CNTL2
, 0);
891 WREG32(VM_L2_CNTL3
, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
892 /* Setup TLB control */
893 tmp
= EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
894 WREG32(MC_VM_MD_L1_TLB0_CNTL
, tmp
);
895 WREG32(MC_VM_MD_L1_TLB1_CNTL
, tmp
);
896 WREG32(MC_VM_MD_L1_TLB2_CNTL
, tmp
);
897 WREG32(MC_VM_MB_L1_TLB0_CNTL
, tmp
);
898 WREG32(MC_VM_MB_L1_TLB1_CNTL
, tmp
);
899 WREG32(MC_VM_MB_L1_TLB2_CNTL
, tmp
);
900 WREG32(MC_VM_MB_L1_TLB3_CNTL
, tmp
);
901 if (rdev
->gart
.table
.vram
.robj
) {
902 r
= radeon_bo_reserve(rdev
->gart
.table
.vram
.robj
, false);
903 if (likely(r
== 0)) {
904 radeon_bo_kunmap(rdev
->gart
.table
.vram
.robj
);
905 radeon_bo_unpin(rdev
->gart
.table
.vram
.robj
);
906 radeon_bo_unreserve(rdev
->gart
.table
.vram
.robj
);
911 void evergreen_pcie_gart_fini(struct radeon_device
*rdev
)
913 evergreen_pcie_gart_disable(rdev
);
914 radeon_gart_table_vram_free(rdev
);
915 radeon_gart_fini(rdev
);
919 void evergreen_agp_enable(struct radeon_device
*rdev
)
924 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
925 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
926 EFFECTIVE_L2_QUEUE_SIZE(7));
927 WREG32(VM_L2_CNTL2
, 0);
928 WREG32(VM_L2_CNTL3
, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
929 /* Setup TLB control */
930 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
931 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
932 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU
|
933 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
934 WREG32(MC_VM_MD_L1_TLB0_CNTL
, tmp
);
935 WREG32(MC_VM_MD_L1_TLB1_CNTL
, tmp
);
936 WREG32(MC_VM_MD_L1_TLB2_CNTL
, tmp
);
937 WREG32(MC_VM_MB_L1_TLB0_CNTL
, tmp
);
938 WREG32(MC_VM_MB_L1_TLB1_CNTL
, tmp
);
939 WREG32(MC_VM_MB_L1_TLB2_CNTL
, tmp
);
940 WREG32(MC_VM_MB_L1_TLB3_CNTL
, tmp
);
941 WREG32(VM_CONTEXT0_CNTL
, 0);
942 WREG32(VM_CONTEXT1_CNTL
, 0);
945 static void evergreen_mc_stop(struct radeon_device
*rdev
, struct evergreen_mc_save
*save
)
947 save
->vga_control
[0] = RREG32(D1VGA_CONTROL
);
948 save
->vga_control
[1] = RREG32(D2VGA_CONTROL
);
949 save
->vga_control
[2] = RREG32(EVERGREEN_D3VGA_CONTROL
);
950 save
->vga_control
[3] = RREG32(EVERGREEN_D4VGA_CONTROL
);
951 save
->vga_control
[4] = RREG32(EVERGREEN_D5VGA_CONTROL
);
952 save
->vga_control
[5] = RREG32(EVERGREEN_D6VGA_CONTROL
);
953 save
->vga_render_control
= RREG32(VGA_RENDER_CONTROL
);
954 save
->vga_hdp_control
= RREG32(VGA_HDP_CONTROL
);
955 save
->crtc_control
[0] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
);
956 save
->crtc_control
[1] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
);
957 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
958 save
->crtc_control
[2] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
);
959 save
->crtc_control
[3] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
);
960 save
->crtc_control
[4] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
);
961 save
->crtc_control
[5] = RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
);
965 WREG32(VGA_RENDER_CONTROL
, 0);
966 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 1);
967 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 1);
968 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
969 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 1);
970 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 1);
971 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 1);
972 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 1);
974 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
975 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
976 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
977 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
978 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
979 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
980 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
982 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
983 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
984 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
985 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
986 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
987 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
988 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
991 WREG32(D1VGA_CONTROL
, 0);
992 WREG32(D2VGA_CONTROL
, 0);
993 WREG32(EVERGREEN_D3VGA_CONTROL
, 0);
994 WREG32(EVERGREEN_D4VGA_CONTROL
, 0);
995 WREG32(EVERGREEN_D5VGA_CONTROL
, 0);
996 WREG32(EVERGREEN_D6VGA_CONTROL
, 0);
999 static void evergreen_mc_resume(struct radeon_device
*rdev
, struct evergreen_mc_save
*save
)
1001 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC0_REGISTER_OFFSET
,
1002 upper_32_bits(rdev
->mc
.vram_start
));
1003 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC0_REGISTER_OFFSET
,
1004 upper_32_bits(rdev
->mc
.vram_start
));
1005 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
,
1006 (u32
)rdev
->mc
.vram_start
);
1007 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
,
1008 (u32
)rdev
->mc
.vram_start
);
1010 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC1_REGISTER_OFFSET
,
1011 upper_32_bits(rdev
->mc
.vram_start
));
1012 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC1_REGISTER_OFFSET
,
1013 upper_32_bits(rdev
->mc
.vram_start
));
1014 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
,
1015 (u32
)rdev
->mc
.vram_start
);
1016 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
,
1017 (u32
)rdev
->mc
.vram_start
);
1019 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
1020 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC2_REGISTER_OFFSET
,
1021 upper_32_bits(rdev
->mc
.vram_start
));
1022 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC2_REGISTER_OFFSET
,
1023 upper_32_bits(rdev
->mc
.vram_start
));
1024 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
,
1025 (u32
)rdev
->mc
.vram_start
);
1026 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
,
1027 (u32
)rdev
->mc
.vram_start
);
1029 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC3_REGISTER_OFFSET
,
1030 upper_32_bits(rdev
->mc
.vram_start
));
1031 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC3_REGISTER_OFFSET
,
1032 upper_32_bits(rdev
->mc
.vram_start
));
1033 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
,
1034 (u32
)rdev
->mc
.vram_start
);
1035 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
,
1036 (u32
)rdev
->mc
.vram_start
);
1038 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC4_REGISTER_OFFSET
,
1039 upper_32_bits(rdev
->mc
.vram_start
));
1040 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC4_REGISTER_OFFSET
,
1041 upper_32_bits(rdev
->mc
.vram_start
));
1042 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
,
1043 (u32
)rdev
->mc
.vram_start
);
1044 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
,
1045 (u32
)rdev
->mc
.vram_start
);
1047 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC5_REGISTER_OFFSET
,
1048 upper_32_bits(rdev
->mc
.vram_start
));
1049 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH
+ EVERGREEN_CRTC5_REGISTER_OFFSET
,
1050 upper_32_bits(rdev
->mc
.vram_start
));
1051 WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
,
1052 (u32
)rdev
->mc
.vram_start
);
1053 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
,
1054 (u32
)rdev
->mc
.vram_start
);
1057 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH
, upper_32_bits(rdev
->mc
.vram_start
));
1058 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS
, (u32
)rdev
->mc
.vram_start
);
1059 /* Unlock host access */
1060 WREG32(VGA_HDP_CONTROL
, save
->vga_hdp_control
);
1062 /* Restore video state */
1063 WREG32(D1VGA_CONTROL
, save
->vga_control
[0]);
1064 WREG32(D2VGA_CONTROL
, save
->vga_control
[1]);
1065 WREG32(EVERGREEN_D3VGA_CONTROL
, save
->vga_control
[2]);
1066 WREG32(EVERGREEN_D4VGA_CONTROL
, save
->vga_control
[3]);
1067 WREG32(EVERGREEN_D5VGA_CONTROL
, save
->vga_control
[4]);
1068 WREG32(EVERGREEN_D6VGA_CONTROL
, save
->vga_control
[5]);
1069 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 1);
1070 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 1);
1071 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
1072 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 1);
1073 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 1);
1074 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 1);
1075 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 1);
1077 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, save
->crtc_control
[0]);
1078 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, save
->crtc_control
[1]);
1079 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
1080 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, save
->crtc_control
[2]);
1081 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, save
->crtc_control
[3]);
1082 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, save
->crtc_control
[4]);
1083 WREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, save
->crtc_control
[5]);
1085 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
1086 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
1087 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
1088 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
1089 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
1090 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
1091 WREG32(EVERGREEN_CRTC_UPDATE_LOCK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
1093 WREG32(VGA_RENDER_CONTROL
, save
->vga_render_control
);
1096 static void evergreen_mc_program(struct radeon_device
*rdev
)
1098 struct evergreen_mc_save save
;
1102 /* Initialize HDP */
1103 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1104 WREG32((0x2c14 + j
), 0x00000000);
1105 WREG32((0x2c18 + j
), 0x00000000);
1106 WREG32((0x2c1c + j
), 0x00000000);
1107 WREG32((0x2c20 + j
), 0x00000000);
1108 WREG32((0x2c24 + j
), 0x00000000);
1110 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL
, 0);
1112 evergreen_mc_stop(rdev
, &save
);
1113 if (evergreen_mc_wait_for_idle(rdev
)) {
1114 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1116 /* Lockout access through VGA aperture*/
1117 WREG32(VGA_HDP_CONTROL
, VGA_MEMORY_DISABLE
);
1118 /* Update configuration */
1119 if (rdev
->flags
& RADEON_IS_AGP
) {
1120 if (rdev
->mc
.vram_start
< rdev
->mc
.gtt_start
) {
1121 /* VRAM before AGP */
1122 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1123 rdev
->mc
.vram_start
>> 12);
1124 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1125 rdev
->mc
.gtt_end
>> 12);
1127 /* VRAM after AGP */
1128 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1129 rdev
->mc
.gtt_start
>> 12);
1130 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1131 rdev
->mc
.vram_end
>> 12);
1134 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1135 rdev
->mc
.vram_start
>> 12);
1136 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1137 rdev
->mc
.vram_end
>> 12);
1139 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
, 0);
1140 if (rdev
->flags
& RADEON_IS_IGP
) {
1141 tmp
= RREG32(MC_FUS_VM_FB_OFFSET
) & 0x000FFFFF;
1142 tmp
|= ((rdev
->mc
.vram_end
>> 20) & 0xF) << 24;
1143 tmp
|= ((rdev
->mc
.vram_start
>> 20) & 0xF) << 20;
1144 WREG32(MC_FUS_VM_FB_OFFSET
, tmp
);
1146 tmp
= ((rdev
->mc
.vram_end
>> 24) & 0xFFFF) << 16;
1147 tmp
|= ((rdev
->mc
.vram_start
>> 24) & 0xFFFF);
1148 WREG32(MC_VM_FB_LOCATION
, tmp
);
1149 WREG32(HDP_NONSURFACE_BASE
, (rdev
->mc
.vram_start
>> 8));
1150 WREG32(HDP_NONSURFACE_INFO
, (2 << 7));
1151 WREG32(HDP_NONSURFACE_SIZE
, 0x3FFFFFFF);
1152 if (rdev
->flags
& RADEON_IS_AGP
) {
1153 WREG32(MC_VM_AGP_TOP
, rdev
->mc
.gtt_end
>> 16);
1154 WREG32(MC_VM_AGP_BOT
, rdev
->mc
.gtt_start
>> 16);
1155 WREG32(MC_VM_AGP_BASE
, rdev
->mc
.agp_base
>> 22);
1157 WREG32(MC_VM_AGP_BASE
, 0);
1158 WREG32(MC_VM_AGP_TOP
, 0x0FFFFFFF);
1159 WREG32(MC_VM_AGP_BOT
, 0x0FFFFFFF);
1161 if (evergreen_mc_wait_for_idle(rdev
)) {
1162 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1164 evergreen_mc_resume(rdev
, &save
);
1165 /* we need to own VRAM, so turn off the VGA renderer here
1166 * to stop it overwriting our objects */
1167 rv515_vga_render_disable(rdev
);
1174 static int evergreen_cp_load_microcode(struct radeon_device
*rdev
)
1176 const __be32
*fw_data
;
1179 if (!rdev
->me_fw
|| !rdev
->pfp_fw
)
1183 WREG32(CP_RB_CNTL
, RB_NO_UPDATE
| (15 << 8) | (3 << 0));
1185 fw_data
= (const __be32
*)rdev
->pfp_fw
->data
;
1186 WREG32(CP_PFP_UCODE_ADDR
, 0);
1187 for (i
= 0; i
< EVERGREEN_PFP_UCODE_SIZE
; i
++)
1188 WREG32(CP_PFP_UCODE_DATA
, be32_to_cpup(fw_data
++));
1189 WREG32(CP_PFP_UCODE_ADDR
, 0);
1191 fw_data
= (const __be32
*)rdev
->me_fw
->data
;
1192 WREG32(CP_ME_RAM_WADDR
, 0);
1193 for (i
= 0; i
< EVERGREEN_PM4_UCODE_SIZE
; i
++)
1194 WREG32(CP_ME_RAM_DATA
, be32_to_cpup(fw_data
++));
1196 WREG32(CP_PFP_UCODE_ADDR
, 0);
1197 WREG32(CP_ME_RAM_WADDR
, 0);
1198 WREG32(CP_ME_RAM_RADDR
, 0);
1202 static int evergreen_cp_start(struct radeon_device
*rdev
)
1207 r
= radeon_ring_lock(rdev
, 7);
1209 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
1212 radeon_ring_write(rdev
, PACKET3(PACKET3_ME_INITIALIZE
, 5));
1213 radeon_ring_write(rdev
, 0x1);
1214 radeon_ring_write(rdev
, 0x0);
1215 radeon_ring_write(rdev
, rdev
->config
.evergreen
.max_hw_contexts
- 1);
1216 radeon_ring_write(rdev
, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1217 radeon_ring_write(rdev
, 0);
1218 radeon_ring_write(rdev
, 0);
1219 radeon_ring_unlock_commit(rdev
);
1222 WREG32(CP_ME_CNTL
, cp_me
);
1224 r
= radeon_ring_lock(rdev
, evergreen_default_size
+ 15);
1226 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
1230 /* setup clear context state */
1231 radeon_ring_write(rdev
, PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
1232 radeon_ring_write(rdev
, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE
);
1234 for (i
= 0; i
< evergreen_default_size
; i
++)
1235 radeon_ring_write(rdev
, evergreen_default_state
[i
]);
1237 radeon_ring_write(rdev
, PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
1238 radeon_ring_write(rdev
, PACKET3_PREAMBLE_END_CLEAR_STATE
);
1240 /* set clear context state */
1241 radeon_ring_write(rdev
, PACKET3(PACKET3_CLEAR_STATE
, 0));
1242 radeon_ring_write(rdev
, 0);
1244 /* SQ_VTX_BASE_VTX_LOC */
1245 radeon_ring_write(rdev
, 0xc0026f00);
1246 radeon_ring_write(rdev
, 0x00000000);
1247 radeon_ring_write(rdev
, 0x00000000);
1248 radeon_ring_write(rdev
, 0x00000000);
1251 radeon_ring_write(rdev
, 0xc0036f00);
1252 radeon_ring_write(rdev
, 0x00000bc4);
1253 radeon_ring_write(rdev
, 0xffffffff);
1254 radeon_ring_write(rdev
, 0xffffffff);
1255 radeon_ring_write(rdev
, 0xffffffff);
1257 radeon_ring_unlock_commit(rdev
);
1262 int evergreen_cp_resume(struct radeon_device
*rdev
)
1268 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1269 WREG32(GRBM_SOFT_RESET
, (SOFT_RESET_CP
|
1274 RREG32(GRBM_SOFT_RESET
);
1276 WREG32(GRBM_SOFT_RESET
, 0);
1277 RREG32(GRBM_SOFT_RESET
);
1279 /* Set ring buffer size */
1280 rb_bufsz
= drm_order(rdev
->cp
.ring_size
/ 8);
1281 tmp
= (drm_order(RADEON_GPU_PAGE_SIZE
/8) << 8) | rb_bufsz
;
1283 tmp
|= BUF_SWAP_32BIT
;
1285 WREG32(CP_RB_CNTL
, tmp
);
1286 WREG32(CP_SEM_WAIT_TIMER
, 0x4);
1288 /* Set the write pointer delay */
1289 WREG32(CP_RB_WPTR_DELAY
, 0);
1291 /* Initialize the ring buffer's read and write pointers */
1292 WREG32(CP_RB_CNTL
, tmp
| RB_RPTR_WR_ENA
);
1293 WREG32(CP_RB_RPTR_WR
, 0);
1294 WREG32(CP_RB_WPTR
, 0);
1296 /* set the wb address wether it's enabled or not */
1297 WREG32(CP_RB_RPTR_ADDR
, (rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
) & 0xFFFFFFFC);
1298 WREG32(CP_RB_RPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
) & 0xFF);
1299 WREG32(SCRATCH_ADDR
, ((rdev
->wb
.gpu_addr
+ RADEON_WB_SCRATCH_OFFSET
) >> 8) & 0xFFFFFFFF);
1301 if (rdev
->wb
.enabled
)
1302 WREG32(SCRATCH_UMSK
, 0xff);
1304 tmp
|= RB_NO_UPDATE
;
1305 WREG32(SCRATCH_UMSK
, 0);
1309 WREG32(CP_RB_CNTL
, tmp
);
1311 WREG32(CP_RB_BASE
, rdev
->cp
.gpu_addr
>> 8);
1312 WREG32(CP_DEBUG
, (1 << 27) | (1 << 28));
1314 rdev
->cp
.rptr
= RREG32(CP_RB_RPTR
);
1315 rdev
->cp
.wptr
= RREG32(CP_RB_WPTR
);
1317 evergreen_cp_start(rdev
);
1318 rdev
->cp
.ready
= true;
1319 r
= radeon_ring_test(rdev
);
1321 rdev
->cp
.ready
= false;
1330 static u32
evergreen_get_tile_pipe_to_backend_map(struct radeon_device
*rdev
,
1333 u32 backend_disable_mask
)
1335 u32 backend_map
= 0;
1336 u32 enabled_backends_mask
= 0;
1337 u32 enabled_backends_count
= 0;
1339 u32 swizzle_pipe
[EVERGREEN_MAX_PIPES
];
1340 u32 cur_backend
= 0;
1342 bool force_no_swizzle
;
1344 if (num_tile_pipes
> EVERGREEN_MAX_PIPES
)
1345 num_tile_pipes
= EVERGREEN_MAX_PIPES
;
1346 if (num_tile_pipes
< 1)
1348 if (num_backends
> EVERGREEN_MAX_BACKENDS
)
1349 num_backends
= EVERGREEN_MAX_BACKENDS
;
1350 if (num_backends
< 1)
1353 for (i
= 0; i
< EVERGREEN_MAX_BACKENDS
; ++i
) {
1354 if (((backend_disable_mask
>> i
) & 1) == 0) {
1355 enabled_backends_mask
|= (1 << i
);
1356 ++enabled_backends_count
;
1358 if (enabled_backends_count
== num_backends
)
1362 if (enabled_backends_count
== 0) {
1363 enabled_backends_mask
= 1;
1364 enabled_backends_count
= 1;
1367 if (enabled_backends_count
!= num_backends
)
1368 num_backends
= enabled_backends_count
;
1370 memset((uint8_t *)&swizzle_pipe
[0], 0, sizeof(u32
) * EVERGREEN_MAX_PIPES
);
1371 switch (rdev
->family
) {
1375 force_no_swizzle
= false;
1381 force_no_swizzle
= true;
1384 if (force_no_swizzle
) {
1385 bool last_backend_enabled
= false;
1387 force_no_swizzle
= false;
1388 for (i
= 0; i
< EVERGREEN_MAX_BACKENDS
; ++i
) {
1389 if (((enabled_backends_mask
>> i
) & 1) == 1) {
1390 if (last_backend_enabled
)
1391 force_no_swizzle
= true;
1392 last_backend_enabled
= true;
1394 last_backend_enabled
= false;
1398 switch (num_tile_pipes
) {
1403 DRM_ERROR("odd number of pipes!\n");
1406 swizzle_pipe
[0] = 0;
1407 swizzle_pipe
[1] = 1;
1410 if (force_no_swizzle
) {
1411 swizzle_pipe
[0] = 0;
1412 swizzle_pipe
[1] = 1;
1413 swizzle_pipe
[2] = 2;
1414 swizzle_pipe
[3] = 3;
1416 swizzle_pipe
[0] = 0;
1417 swizzle_pipe
[1] = 2;
1418 swizzle_pipe
[2] = 1;
1419 swizzle_pipe
[3] = 3;
1423 if (force_no_swizzle
) {
1424 swizzle_pipe
[0] = 0;
1425 swizzle_pipe
[1] = 1;
1426 swizzle_pipe
[2] = 2;
1427 swizzle_pipe
[3] = 3;
1428 swizzle_pipe
[4] = 4;
1429 swizzle_pipe
[5] = 5;
1431 swizzle_pipe
[0] = 0;
1432 swizzle_pipe
[1] = 2;
1433 swizzle_pipe
[2] = 4;
1434 swizzle_pipe
[3] = 1;
1435 swizzle_pipe
[4] = 3;
1436 swizzle_pipe
[5] = 5;
1440 if (force_no_swizzle
) {
1441 swizzle_pipe
[0] = 0;
1442 swizzle_pipe
[1] = 1;
1443 swizzle_pipe
[2] = 2;
1444 swizzle_pipe
[3] = 3;
1445 swizzle_pipe
[4] = 4;
1446 swizzle_pipe
[5] = 5;
1447 swizzle_pipe
[6] = 6;
1448 swizzle_pipe
[7] = 7;
1450 swizzle_pipe
[0] = 0;
1451 swizzle_pipe
[1] = 2;
1452 swizzle_pipe
[2] = 4;
1453 swizzle_pipe
[3] = 6;
1454 swizzle_pipe
[4] = 1;
1455 swizzle_pipe
[5] = 3;
1456 swizzle_pipe
[6] = 5;
1457 swizzle_pipe
[7] = 7;
1462 for (cur_pipe
= 0; cur_pipe
< num_tile_pipes
; ++cur_pipe
) {
1463 while (((1 << cur_backend
) & enabled_backends_mask
) == 0)
1464 cur_backend
= (cur_backend
+ 1) % EVERGREEN_MAX_BACKENDS
;
1466 backend_map
|= (((cur_backend
& 0xf) << (swizzle_pipe
[cur_pipe
] * 4)));
1468 cur_backend
= (cur_backend
+ 1) % EVERGREEN_MAX_BACKENDS
;
1474 static void evergreen_program_channel_remap(struct radeon_device
*rdev
)
1476 u32 tcp_chan_steer_lo
, tcp_chan_steer_hi
, mc_shared_chremap
, tmp
;
1478 tmp
= RREG32(MC_SHARED_CHMAP
);
1479 switch ((tmp
& NOOFCHAN_MASK
) >> NOOFCHAN_SHIFT
) {
1485 /* default mapping */
1486 mc_shared_chremap
= 0x00fac688;
1490 switch (rdev
->family
) {
1493 tcp_chan_steer_lo
= 0x54763210;
1494 tcp_chan_steer_hi
= 0x0000ba98;
1501 tcp_chan_steer_lo
= 0x76543210;
1502 tcp_chan_steer_hi
= 0x0000ba98;
1506 WREG32(TCP_CHAN_STEER_LO
, tcp_chan_steer_lo
);
1507 WREG32(TCP_CHAN_STEER_HI
, tcp_chan_steer_hi
);
1508 WREG32(MC_SHARED_CHREMAP
, mc_shared_chremap
);
1511 static void evergreen_gpu_init(struct radeon_device
*rdev
)
1513 u32 cc_rb_backend_disable
= 0;
1514 u32 cc_gc_shader_pipe_config
;
1515 u32 gb_addr_config
= 0;
1516 u32 mc_shared_chmap
, mc_arb_ramcfg
;
1522 u32 sq_lds_resource_mgmt
;
1523 u32 sq_gpr_resource_mgmt_1
;
1524 u32 sq_gpr_resource_mgmt_2
;
1525 u32 sq_gpr_resource_mgmt_3
;
1526 u32 sq_thread_resource_mgmt
;
1527 u32 sq_thread_resource_mgmt_2
;
1528 u32 sq_stack_resource_mgmt_1
;
1529 u32 sq_stack_resource_mgmt_2
;
1530 u32 sq_stack_resource_mgmt_3
;
1531 u32 vgt_cache_invalidation
;
1532 u32 hdp_host_path_cntl
;
1533 int i
, j
, num_shader_engines
, ps_thread_count
;
1535 switch (rdev
->family
) {
1538 rdev
->config
.evergreen
.num_ses
= 2;
1539 rdev
->config
.evergreen
.max_pipes
= 4;
1540 rdev
->config
.evergreen
.max_tile_pipes
= 8;
1541 rdev
->config
.evergreen
.max_simds
= 10;
1542 rdev
->config
.evergreen
.max_backends
= 4 * rdev
->config
.evergreen
.num_ses
;
1543 rdev
->config
.evergreen
.max_gprs
= 256;
1544 rdev
->config
.evergreen
.max_threads
= 248;
1545 rdev
->config
.evergreen
.max_gs_threads
= 32;
1546 rdev
->config
.evergreen
.max_stack_entries
= 512;
1547 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1548 rdev
->config
.evergreen
.sx_max_export_size
= 256;
1549 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
1550 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
1551 rdev
->config
.evergreen
.max_hw_contexts
= 8;
1552 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1554 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x100;
1555 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1556 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1559 rdev
->config
.evergreen
.num_ses
= 1;
1560 rdev
->config
.evergreen
.max_pipes
= 4;
1561 rdev
->config
.evergreen
.max_tile_pipes
= 4;
1562 rdev
->config
.evergreen
.max_simds
= 10;
1563 rdev
->config
.evergreen
.max_backends
= 4 * rdev
->config
.evergreen
.num_ses
;
1564 rdev
->config
.evergreen
.max_gprs
= 256;
1565 rdev
->config
.evergreen
.max_threads
= 248;
1566 rdev
->config
.evergreen
.max_gs_threads
= 32;
1567 rdev
->config
.evergreen
.max_stack_entries
= 512;
1568 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1569 rdev
->config
.evergreen
.sx_max_export_size
= 256;
1570 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
1571 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
1572 rdev
->config
.evergreen
.max_hw_contexts
= 8;
1573 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1575 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x100;
1576 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1577 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1580 rdev
->config
.evergreen
.num_ses
= 1;
1581 rdev
->config
.evergreen
.max_pipes
= 4;
1582 rdev
->config
.evergreen
.max_tile_pipes
= 4;
1583 rdev
->config
.evergreen
.max_simds
= 5;
1584 rdev
->config
.evergreen
.max_backends
= 2 * rdev
->config
.evergreen
.num_ses
;
1585 rdev
->config
.evergreen
.max_gprs
= 256;
1586 rdev
->config
.evergreen
.max_threads
= 248;
1587 rdev
->config
.evergreen
.max_gs_threads
= 32;
1588 rdev
->config
.evergreen
.max_stack_entries
= 256;
1589 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1590 rdev
->config
.evergreen
.sx_max_export_size
= 256;
1591 rdev
->config
.evergreen
.sx_max_export_pos_size
= 64;
1592 rdev
->config
.evergreen
.sx_max_export_smx_size
= 192;
1593 rdev
->config
.evergreen
.max_hw_contexts
= 8;
1594 rdev
->config
.evergreen
.sq_num_cf_insts
= 2;
1596 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x100;
1597 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1598 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1602 rdev
->config
.evergreen
.num_ses
= 1;
1603 rdev
->config
.evergreen
.max_pipes
= 2;
1604 rdev
->config
.evergreen
.max_tile_pipes
= 2;
1605 rdev
->config
.evergreen
.max_simds
= 2;
1606 rdev
->config
.evergreen
.max_backends
= 1 * rdev
->config
.evergreen
.num_ses
;
1607 rdev
->config
.evergreen
.max_gprs
= 256;
1608 rdev
->config
.evergreen
.max_threads
= 192;
1609 rdev
->config
.evergreen
.max_gs_threads
= 16;
1610 rdev
->config
.evergreen
.max_stack_entries
= 256;
1611 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1612 rdev
->config
.evergreen
.sx_max_export_size
= 128;
1613 rdev
->config
.evergreen
.sx_max_export_pos_size
= 32;
1614 rdev
->config
.evergreen
.sx_max_export_smx_size
= 96;
1615 rdev
->config
.evergreen
.max_hw_contexts
= 4;
1616 rdev
->config
.evergreen
.sq_num_cf_insts
= 1;
1618 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x40;
1619 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1620 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1623 rdev
->config
.evergreen
.num_ses
= 1;
1624 rdev
->config
.evergreen
.max_pipes
= 2;
1625 rdev
->config
.evergreen
.max_tile_pipes
= 2;
1626 rdev
->config
.evergreen
.max_simds
= 2;
1627 rdev
->config
.evergreen
.max_backends
= 1 * rdev
->config
.evergreen
.num_ses
;
1628 rdev
->config
.evergreen
.max_gprs
= 256;
1629 rdev
->config
.evergreen
.max_threads
= 192;
1630 rdev
->config
.evergreen
.max_gs_threads
= 16;
1631 rdev
->config
.evergreen
.max_stack_entries
= 256;
1632 rdev
->config
.evergreen
.sx_num_of_sets
= 4;
1633 rdev
->config
.evergreen
.sx_max_export_size
= 128;
1634 rdev
->config
.evergreen
.sx_max_export_pos_size
= 32;
1635 rdev
->config
.evergreen
.sx_max_export_smx_size
= 96;
1636 rdev
->config
.evergreen
.max_hw_contexts
= 4;
1637 rdev
->config
.evergreen
.sq_num_cf_insts
= 1;
1639 rdev
->config
.evergreen
.sc_prim_fifo_size
= 0x40;
1640 rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
= 0x30;
1641 rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
= 0x130;
1645 /* Initialize HDP */
1646 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1647 WREG32((0x2c14 + j
), 0x00000000);
1648 WREG32((0x2c18 + j
), 0x00000000);
1649 WREG32((0x2c1c + j
), 0x00000000);
1650 WREG32((0x2c20 + j
), 0x00000000);
1651 WREG32((0x2c24 + j
), 0x00000000);
1654 WREG32(GRBM_CNTL
, GRBM_READ_TIMEOUT(0xff));
1656 cc_gc_shader_pipe_config
= RREG32(CC_GC_SHADER_PIPE_CONFIG
) & ~2;
1658 cc_gc_shader_pipe_config
|=
1659 INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK
<< rdev
->config
.evergreen
.max_pipes
)
1660 & EVERGREEN_MAX_PIPES_MASK
);
1661 cc_gc_shader_pipe_config
|=
1662 INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK
<< rdev
->config
.evergreen
.max_simds
)
1663 & EVERGREEN_MAX_SIMDS_MASK
);
1665 cc_rb_backend_disable
=
1666 BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK
<< rdev
->config
.evergreen
.max_backends
)
1667 & EVERGREEN_MAX_BACKENDS_MASK
);
1670 mc_shared_chmap
= RREG32(MC_SHARED_CHMAP
);
1671 mc_arb_ramcfg
= RREG32(MC_ARB_RAMCFG
);
1673 switch (rdev
->config
.evergreen
.max_tile_pipes
) {
1676 gb_addr_config
|= NUM_PIPES(0);
1679 gb_addr_config
|= NUM_PIPES(1);
1682 gb_addr_config
|= NUM_PIPES(2);
1685 gb_addr_config
|= NUM_PIPES(3);
1689 gb_addr_config
|= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg
& BURSTLENGTH_MASK
) >> BURSTLENGTH_SHIFT
);
1690 gb_addr_config
|= BANK_INTERLEAVE_SIZE(0);
1691 gb_addr_config
|= NUM_SHADER_ENGINES(rdev
->config
.evergreen
.num_ses
- 1);
1692 gb_addr_config
|= SHADER_ENGINE_TILE_SIZE(1);
1693 gb_addr_config
|= NUM_GPUS(0); /* Hemlock? */
1694 gb_addr_config
|= MULTI_GPU_TILE_SIZE(2);
1696 if (((mc_arb_ramcfg
& NOOFCOLS_MASK
) >> NOOFCOLS_SHIFT
) > 2)
1697 gb_addr_config
|= ROW_SIZE(2);
1699 gb_addr_config
|= ROW_SIZE((mc_arb_ramcfg
& NOOFCOLS_MASK
) >> NOOFCOLS_SHIFT
);
1701 if (rdev
->ddev
->pdev
->device
== 0x689e) {
1704 u8 efuse_box_bit_131_124
;
1706 WREG32(RCU_IND_INDEX
, 0x204);
1707 efuse_straps_4
= RREG32(RCU_IND_DATA
);
1708 WREG32(RCU_IND_INDEX
, 0x203);
1709 efuse_straps_3
= RREG32(RCU_IND_DATA
);
1710 efuse_box_bit_131_124
= (u8
)(((efuse_straps_4
& 0xf) << 4) | ((efuse_straps_3
& 0xf0000000) >> 28));
1712 switch(efuse_box_bit_131_124
) {
1714 gb_backend_map
= 0x76543210;
1717 gb_backend_map
= 0x77553311;
1720 gb_backend_map
= 0x77553300;
1723 gb_backend_map
= 0x77552211;
1726 gb_backend_map
= 0x77443300;
1729 gb_backend_map
= 0x66552211;
1732 gb_backend_map
= 0x77552200;
1735 gb_backend_map
= 0x66442200;
1738 gb_backend_map
= 0x66553311;
1741 DRM_ERROR("bad backend map, using default\n");
1743 evergreen_get_tile_pipe_to_backend_map(rdev
,
1744 rdev
->config
.evergreen
.max_tile_pipes
,
1745 rdev
->config
.evergreen
.max_backends
,
1746 ((EVERGREEN_MAX_BACKENDS_MASK
<<
1747 rdev
->config
.evergreen
.max_backends
) &
1748 EVERGREEN_MAX_BACKENDS_MASK
));
1751 } else if (rdev
->ddev
->pdev
->device
== 0x68b9) {
1753 u8 efuse_box_bit_127_124
;
1755 WREG32(RCU_IND_INDEX
, 0x203);
1756 efuse_straps_3
= RREG32(RCU_IND_DATA
);
1757 efuse_box_bit_127_124
= (u8
)((efuse_straps_3
& 0xF0000000) >> 28);
1759 switch(efuse_box_bit_127_124
) {
1761 gb_backend_map
= 0x00003210;
1767 gb_backend_map
= 0x00003311;
1770 DRM_ERROR("bad backend map, using default\n");
1772 evergreen_get_tile_pipe_to_backend_map(rdev
,
1773 rdev
->config
.evergreen
.max_tile_pipes
,
1774 rdev
->config
.evergreen
.max_backends
,
1775 ((EVERGREEN_MAX_BACKENDS_MASK
<<
1776 rdev
->config
.evergreen
.max_backends
) &
1777 EVERGREEN_MAX_BACKENDS_MASK
));
1781 switch (rdev
->family
) {
1784 gb_backend_map
= 0x66442200;
1787 gb_backend_map
= 0x00006420;
1791 evergreen_get_tile_pipe_to_backend_map(rdev
,
1792 rdev
->config
.evergreen
.max_tile_pipes
,
1793 rdev
->config
.evergreen
.max_backends
,
1794 ((EVERGREEN_MAX_BACKENDS_MASK
<<
1795 rdev
->config
.evergreen
.max_backends
) &
1796 EVERGREEN_MAX_BACKENDS_MASK
));
1800 /* setup tiling info dword. gb_addr_config is not adequate since it does
1801 * not have bank info, so create a custom tiling dword.
1802 * bits 3:0 num_pipes
1803 * bits 7:4 num_banks
1804 * bits 11:8 group_size
1805 * bits 15:12 row_size
1807 rdev
->config
.evergreen
.tile_config
= 0;
1808 switch (rdev
->config
.evergreen
.max_tile_pipes
) {
1811 rdev
->config
.evergreen
.tile_config
|= (0 << 0);
1814 rdev
->config
.evergreen
.tile_config
|= (1 << 0);
1817 rdev
->config
.evergreen
.tile_config
|= (2 << 0);
1820 rdev
->config
.evergreen
.tile_config
|= (3 << 0);
1823 rdev
->config
.evergreen
.tile_config
|=
1824 ((mc_arb_ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
) << 4;
1825 rdev
->config
.evergreen
.tile_config
|=
1826 ((mc_arb_ramcfg
& BURSTLENGTH_MASK
) >> BURSTLENGTH_SHIFT
) << 8;
1827 rdev
->config
.evergreen
.tile_config
|=
1828 ((gb_addr_config
& 0x30000000) >> 28) << 12;
1830 WREG32(GB_BACKEND_MAP
, gb_backend_map
);
1831 WREG32(GB_ADDR_CONFIG
, gb_addr_config
);
1832 WREG32(DMIF_ADDR_CONFIG
, gb_addr_config
);
1833 WREG32(HDP_ADDR_CONFIG
, gb_addr_config
);
1835 evergreen_program_channel_remap(rdev
);
1837 num_shader_engines
= ((RREG32(GB_ADDR_CONFIG
) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
1838 grbm_gfx_index
= INSTANCE_BROADCAST_WRITES
;
1840 for (i
= 0; i
< rdev
->config
.evergreen
.num_ses
; i
++) {
1841 u32 rb
= cc_rb_backend_disable
| (0xf0 << 16);
1842 u32 sp
= cc_gc_shader_pipe_config
;
1843 u32 gfx
= grbm_gfx_index
| SE_INDEX(i
);
1845 if (i
== num_shader_engines
) {
1846 rb
|= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK
);
1847 sp
|= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK
);
1850 WREG32(GRBM_GFX_INDEX
, gfx
);
1851 WREG32(RLC_GFX_INDEX
, gfx
);
1853 WREG32(CC_RB_BACKEND_DISABLE
, rb
);
1854 WREG32(CC_SYS_RB_BACKEND_DISABLE
, rb
);
1855 WREG32(GC_USER_RB_BACKEND_DISABLE
, rb
);
1856 WREG32(CC_GC_SHADER_PIPE_CONFIG
, sp
);
1859 grbm_gfx_index
|= SE_BROADCAST_WRITES
;
1860 WREG32(GRBM_GFX_INDEX
, grbm_gfx_index
);
1861 WREG32(RLC_GFX_INDEX
, grbm_gfx_index
);
1863 WREG32(CGTS_SYS_TCC_DISABLE
, 0);
1864 WREG32(CGTS_TCC_DISABLE
, 0);
1865 WREG32(CGTS_USER_SYS_TCC_DISABLE
, 0);
1866 WREG32(CGTS_USER_TCC_DISABLE
, 0);
1868 /* set HW defaults for 3D engine */
1869 WREG32(CP_QUEUE_THRESHOLDS
, (ROQ_IB1_START(0x16) |
1870 ROQ_IB2_START(0x2b)));
1872 WREG32(CP_MEQ_THRESHOLDS
, STQ_SPLIT(0x30));
1874 WREG32(TA_CNTL_AUX
, (DISABLE_CUBE_ANISO
|
1879 sx_debug_1
= RREG32(SX_DEBUG_1
);
1880 sx_debug_1
|= ENABLE_NEW_SMX_ADDRESS
;
1881 WREG32(SX_DEBUG_1
, sx_debug_1
);
1884 smx_dc_ctl0
= RREG32(SMX_DC_CTL0
);
1885 smx_dc_ctl0
&= ~NUMBER_OF_SETS(0x1ff);
1886 smx_dc_ctl0
|= NUMBER_OF_SETS(rdev
->config
.evergreen
.sx_num_of_sets
);
1887 WREG32(SMX_DC_CTL0
, smx_dc_ctl0
);
1889 WREG32(SX_EXPORT_BUFFER_SIZES
, (COLOR_BUFFER_SIZE((rdev
->config
.evergreen
.sx_max_export_size
/ 4) - 1) |
1890 POSITION_BUFFER_SIZE((rdev
->config
.evergreen
.sx_max_export_pos_size
/ 4) - 1) |
1891 SMX_BUFFER_SIZE((rdev
->config
.evergreen
.sx_max_export_smx_size
/ 4) - 1)));
1893 WREG32(PA_SC_FIFO_SIZE
, (SC_PRIM_FIFO_SIZE(rdev
->config
.evergreen
.sc_prim_fifo_size
) |
1894 SC_HIZ_TILE_FIFO_SIZE(rdev
->config
.evergreen
.sc_hiz_tile_fifo_size
) |
1895 SC_EARLYZ_TILE_FIFO_SIZE(rdev
->config
.evergreen
.sc_earlyz_tile_fifo_size
)));
1897 WREG32(VGT_NUM_INSTANCES
, 1);
1898 WREG32(SPI_CONFIG_CNTL
, 0);
1899 WREG32(SPI_CONFIG_CNTL_1
, VTX_DONE_DELAY(4));
1900 WREG32(CP_PERFMON_CNTL
, 0);
1902 WREG32(SQ_MS_FIFO_SIZES
, (CACHE_FIFO_SIZE(16 * rdev
->config
.evergreen
.sq_num_cf_insts
) |
1903 FETCH_FIFO_HIWATER(0x4) |
1904 DONE_FIFO_HIWATER(0xe0) |
1905 ALU_UPDATE_FIFO_HIWATER(0x8)));
1907 sq_config
= RREG32(SQ_CONFIG
);
1908 sq_config
&= ~(PS_PRIO(3) |
1912 sq_config
|= (VC_ENABLE
|
1919 switch (rdev
->family
) {
1922 /* no vertex cache */
1923 sq_config
&= ~VC_ENABLE
;
1929 sq_lds_resource_mgmt
= RREG32(SQ_LDS_RESOURCE_MGMT
);
1931 sq_gpr_resource_mgmt_1
= NUM_PS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2))* 12 / 32);
1932 sq_gpr_resource_mgmt_1
|= NUM_VS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 6 / 32);
1933 sq_gpr_resource_mgmt_1
|= NUM_CLAUSE_TEMP_GPRS(4);
1934 sq_gpr_resource_mgmt_2
= NUM_GS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 4 / 32);
1935 sq_gpr_resource_mgmt_2
|= NUM_ES_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 4 / 32);
1936 sq_gpr_resource_mgmt_3
= NUM_HS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 3 / 32);
1937 sq_gpr_resource_mgmt_3
|= NUM_LS_GPRS((rdev
->config
.evergreen
.max_gprs
- (4 * 2)) * 3 / 32);
1939 switch (rdev
->family
) {
1942 ps_thread_count
= 96;
1945 ps_thread_count
= 128;
1949 sq_thread_resource_mgmt
= NUM_PS_THREADS(ps_thread_count
);
1950 sq_thread_resource_mgmt
|= NUM_VS_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
1951 sq_thread_resource_mgmt
|= NUM_GS_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
1952 sq_thread_resource_mgmt
|= NUM_ES_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
1953 sq_thread_resource_mgmt_2
= NUM_HS_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
1954 sq_thread_resource_mgmt_2
|= NUM_LS_THREADS((((rdev
->config
.evergreen
.max_threads
- ps_thread_count
) / 6) / 8) * 8);
1956 sq_stack_resource_mgmt_1
= NUM_PS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
1957 sq_stack_resource_mgmt_1
|= NUM_VS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
1958 sq_stack_resource_mgmt_2
= NUM_GS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
1959 sq_stack_resource_mgmt_2
|= NUM_ES_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
1960 sq_stack_resource_mgmt_3
= NUM_HS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
1961 sq_stack_resource_mgmt_3
|= NUM_LS_STACK_ENTRIES((rdev
->config
.evergreen
.max_stack_entries
* 1) / 6);
1963 WREG32(SQ_CONFIG
, sq_config
);
1964 WREG32(SQ_GPR_RESOURCE_MGMT_1
, sq_gpr_resource_mgmt_1
);
1965 WREG32(SQ_GPR_RESOURCE_MGMT_2
, sq_gpr_resource_mgmt_2
);
1966 WREG32(SQ_GPR_RESOURCE_MGMT_3
, sq_gpr_resource_mgmt_3
);
1967 WREG32(SQ_THREAD_RESOURCE_MGMT
, sq_thread_resource_mgmt
);
1968 WREG32(SQ_THREAD_RESOURCE_MGMT_2
, sq_thread_resource_mgmt_2
);
1969 WREG32(SQ_STACK_RESOURCE_MGMT_1
, sq_stack_resource_mgmt_1
);
1970 WREG32(SQ_STACK_RESOURCE_MGMT_2
, sq_stack_resource_mgmt_2
);
1971 WREG32(SQ_STACK_RESOURCE_MGMT_3
, sq_stack_resource_mgmt_3
);
1972 WREG32(SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
, 0);
1973 WREG32(SQ_LDS_RESOURCE_MGMT
, sq_lds_resource_mgmt
);
1975 WREG32(PA_SC_FORCE_EOV_MAX_CNTS
, (FORCE_EOV_MAX_CLK_CNT(4095) |
1976 FORCE_EOV_MAX_REZ_CNT(255)));
1978 switch (rdev
->family
) {
1981 vgt_cache_invalidation
= CACHE_INVALIDATION(TC_ONLY
);
1984 vgt_cache_invalidation
= CACHE_INVALIDATION(VC_AND_TC
);
1987 vgt_cache_invalidation
|= AUTO_INVLD_EN(ES_AND_GS_AUTO
);
1988 WREG32(VGT_CACHE_INVALIDATION
, vgt_cache_invalidation
);
1990 WREG32(VGT_GS_VERTEX_REUSE
, 16);
1991 WREG32(PA_SC_LINE_STIPPLE_STATE
, 0);
1993 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL
, 14);
1994 WREG32(VGT_OUT_DEALLOC_CNTL
, 16);
1996 WREG32(CB_PERF_CTR0_SEL_0
, 0);
1997 WREG32(CB_PERF_CTR0_SEL_1
, 0);
1998 WREG32(CB_PERF_CTR1_SEL_0
, 0);
1999 WREG32(CB_PERF_CTR1_SEL_1
, 0);
2000 WREG32(CB_PERF_CTR2_SEL_0
, 0);
2001 WREG32(CB_PERF_CTR2_SEL_1
, 0);
2002 WREG32(CB_PERF_CTR3_SEL_0
, 0);
2003 WREG32(CB_PERF_CTR3_SEL_1
, 0);
2005 /* clear render buffer base addresses */
2006 WREG32(CB_COLOR0_BASE
, 0);
2007 WREG32(CB_COLOR1_BASE
, 0);
2008 WREG32(CB_COLOR2_BASE
, 0);
2009 WREG32(CB_COLOR3_BASE
, 0);
2010 WREG32(CB_COLOR4_BASE
, 0);
2011 WREG32(CB_COLOR5_BASE
, 0);
2012 WREG32(CB_COLOR6_BASE
, 0);
2013 WREG32(CB_COLOR7_BASE
, 0);
2014 WREG32(CB_COLOR8_BASE
, 0);
2015 WREG32(CB_COLOR9_BASE
, 0);
2016 WREG32(CB_COLOR10_BASE
, 0);
2017 WREG32(CB_COLOR11_BASE
, 0);
2019 /* set the shader const cache sizes to 0 */
2020 for (i
= SQ_ALU_CONST_BUFFER_SIZE_PS_0
; i
< 0x28200; i
+= 4)
2022 for (i
= SQ_ALU_CONST_BUFFER_SIZE_HS_0
; i
< 0x29000; i
+= 4)
2025 hdp_host_path_cntl
= RREG32(HDP_HOST_PATH_CNTL
);
2026 WREG32(HDP_HOST_PATH_CNTL
, hdp_host_path_cntl
);
2028 WREG32(PA_CL_ENHANCE
, CLIP_VTX_REORDER_ENA
| NUM_CLIP_SEQ(3));
2034 int evergreen_mc_init(struct radeon_device
*rdev
)
2037 int chansize
, numchan
;
2039 /* Get VRAM informations */
2040 rdev
->mc
.vram_is_ddr
= true;
2041 tmp
= RREG32(MC_ARB_RAMCFG
);
2042 if (tmp
& CHANSIZE_OVERRIDE
) {
2044 } else if (tmp
& CHANSIZE_MASK
) {
2049 tmp
= RREG32(MC_SHARED_CHMAP
);
2050 switch ((tmp
& NOOFCHAN_MASK
) >> NOOFCHAN_SHIFT
) {
2065 rdev
->mc
.vram_width
= numchan
* chansize
;
2066 /* Could aper size report 0 ? */
2067 rdev
->mc
.aper_base
= pci_resource_start(rdev
->pdev
, 0);
2068 rdev
->mc
.aper_size
= pci_resource_len(rdev
->pdev
, 0);
2069 /* Setup GPU memory space */
2070 if (rdev
->flags
& RADEON_IS_IGP
) {
2071 /* size in bytes on fusion */
2072 rdev
->mc
.mc_vram_size
= RREG32(CONFIG_MEMSIZE
);
2073 rdev
->mc
.real_vram_size
= RREG32(CONFIG_MEMSIZE
);
2075 /* size in MB on evergreen */
2076 rdev
->mc
.mc_vram_size
= RREG32(CONFIG_MEMSIZE
) * 1024 * 1024;
2077 rdev
->mc
.real_vram_size
= RREG32(CONFIG_MEMSIZE
) * 1024 * 1024;
2079 rdev
->mc
.visible_vram_size
= rdev
->mc
.aper_size
;
2080 rdev
->mc
.active_vram_size
= rdev
->mc
.visible_vram_size
;
2081 r700_vram_gtt_location(rdev
, &rdev
->mc
);
2082 radeon_update_bandwidth_info(rdev
);
2087 bool evergreen_gpu_is_lockup(struct radeon_device
*rdev
)
2091 u32 grbm_status_se0
, grbm_status_se1
;
2092 struct r100_gpu_lockup
*lockup
= &rdev
->config
.evergreen
.lockup
;
2095 srbm_status
= RREG32(SRBM_STATUS
);
2096 grbm_status
= RREG32(GRBM_STATUS
);
2097 grbm_status_se0
= RREG32(GRBM_STATUS_SE0
);
2098 grbm_status_se1
= RREG32(GRBM_STATUS_SE1
);
2099 if (!(grbm_status
& GUI_ACTIVE
)) {
2100 r100_gpu_lockup_update(lockup
, &rdev
->cp
);
2103 /* force CP activities */
2104 r
= radeon_ring_lock(rdev
, 2);
2107 radeon_ring_write(rdev
, 0x80000000);
2108 radeon_ring_write(rdev
, 0x80000000);
2109 radeon_ring_unlock_commit(rdev
);
2111 rdev
->cp
.rptr
= RREG32(CP_RB_RPTR
);
2112 return r100_gpu_cp_is_lockup(rdev
, lockup
, &rdev
->cp
);
2115 static int evergreen_gpu_soft_reset(struct radeon_device
*rdev
)
2117 struct evergreen_mc_save save
;
2120 dev_info(rdev
->dev
, "GPU softreset \n");
2121 dev_info(rdev
->dev
, " GRBM_STATUS=0x%08X\n",
2122 RREG32(GRBM_STATUS
));
2123 dev_info(rdev
->dev
, " GRBM_STATUS_SE0=0x%08X\n",
2124 RREG32(GRBM_STATUS_SE0
));
2125 dev_info(rdev
->dev
, " GRBM_STATUS_SE1=0x%08X\n",
2126 RREG32(GRBM_STATUS_SE1
));
2127 dev_info(rdev
->dev
, " SRBM_STATUS=0x%08X\n",
2128 RREG32(SRBM_STATUS
));
2129 evergreen_mc_stop(rdev
, &save
);
2130 if (evergreen_mc_wait_for_idle(rdev
)) {
2131 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
2133 /* Disable CP parsing/prefetching */
2134 WREG32(CP_ME_CNTL
, CP_ME_HALT
| CP_PFP_HALT
);
2136 /* reset all the gfx blocks */
2137 grbm_reset
= (SOFT_RESET_CP
|
2150 dev_info(rdev
->dev
, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset
);
2151 WREG32(GRBM_SOFT_RESET
, grbm_reset
);
2152 (void)RREG32(GRBM_SOFT_RESET
);
2154 WREG32(GRBM_SOFT_RESET
, 0);
2155 (void)RREG32(GRBM_SOFT_RESET
);
2156 /* Wait a little for things to settle down */
2158 dev_info(rdev
->dev
, " GRBM_STATUS=0x%08X\n",
2159 RREG32(GRBM_STATUS
));
2160 dev_info(rdev
->dev
, " GRBM_STATUS_SE0=0x%08X\n",
2161 RREG32(GRBM_STATUS_SE0
));
2162 dev_info(rdev
->dev
, " GRBM_STATUS_SE1=0x%08X\n",
2163 RREG32(GRBM_STATUS_SE1
));
2164 dev_info(rdev
->dev
, " SRBM_STATUS=0x%08X\n",
2165 RREG32(SRBM_STATUS
));
2166 evergreen_mc_resume(rdev
, &save
);
2170 int evergreen_asic_reset(struct radeon_device
*rdev
)
2172 return evergreen_gpu_soft_reset(rdev
);
2177 u32
evergreen_get_vblank_counter(struct radeon_device
*rdev
, int crtc
)
2181 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC0_REGISTER_OFFSET
);
2183 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC1_REGISTER_OFFSET
);
2185 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC2_REGISTER_OFFSET
);
2187 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC3_REGISTER_OFFSET
);
2189 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC4_REGISTER_OFFSET
);
2191 return RREG32(CRTC_STATUS_FRAME_COUNT
+ EVERGREEN_CRTC5_REGISTER_OFFSET
);
2197 void evergreen_disable_interrupt_state(struct radeon_device
*rdev
)
2201 WREG32(CP_INT_CNTL
, CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
);
2202 WREG32(GRBM_INT_CNTL
, 0);
2203 WREG32(INT_MASK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
2204 WREG32(INT_MASK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
2205 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
2206 WREG32(INT_MASK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
2207 WREG32(INT_MASK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
2208 WREG32(INT_MASK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
2209 WREG32(INT_MASK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
2212 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
2213 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
2214 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
2215 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
2216 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
2217 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
2218 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
2221 WREG32(DACA_AUTODETECT_INT_CONTROL
, 0);
2222 WREG32(DACB_AUTODETECT_INT_CONTROL
, 0);
2224 tmp
= RREG32(DC_HPD1_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2225 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
2226 tmp
= RREG32(DC_HPD2_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2227 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
2228 tmp
= RREG32(DC_HPD3_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2229 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
2230 tmp
= RREG32(DC_HPD4_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2231 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
2232 tmp
= RREG32(DC_HPD5_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2233 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
2234 tmp
= RREG32(DC_HPD6_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2235 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
2239 int evergreen_irq_set(struct radeon_device
*rdev
)
2241 u32 cp_int_cntl
= CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
;
2242 u32 crtc1
= 0, crtc2
= 0, crtc3
= 0, crtc4
= 0, crtc5
= 0, crtc6
= 0;
2243 u32 hpd1
, hpd2
, hpd3
, hpd4
, hpd5
, hpd6
;
2244 u32 grbm_int_cntl
= 0;
2245 u32 grph1
= 0, grph2
= 0, grph3
= 0, grph4
= 0, grph5
= 0, grph6
= 0;
2247 if (!rdev
->irq
.installed
) {
2248 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2251 /* don't enable anything if the ih is disabled */
2252 if (!rdev
->ih
.enabled
) {
2253 r600_disable_interrupts(rdev
);
2254 /* force the active interrupt state to all disabled */
2255 evergreen_disable_interrupt_state(rdev
);
2259 hpd1
= RREG32(DC_HPD1_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2260 hpd2
= RREG32(DC_HPD2_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2261 hpd3
= RREG32(DC_HPD3_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2262 hpd4
= RREG32(DC_HPD4_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2263 hpd5
= RREG32(DC_HPD5_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2264 hpd6
= RREG32(DC_HPD6_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2266 if (rdev
->irq
.sw_int
) {
2267 DRM_DEBUG("evergreen_irq_set: sw int\n");
2268 cp_int_cntl
|= RB_INT_ENABLE
;
2269 cp_int_cntl
|= TIME_STAMP_INT_ENABLE
;
2271 if (rdev
->irq
.crtc_vblank_int
[0] ||
2272 rdev
->irq
.pflip
[0]) {
2273 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
2274 crtc1
|= VBLANK_INT_MASK
;
2276 if (rdev
->irq
.crtc_vblank_int
[1] ||
2277 rdev
->irq
.pflip
[1]) {
2278 DRM_DEBUG("evergreen_irq_set: vblank 1\n");
2279 crtc2
|= VBLANK_INT_MASK
;
2281 if (rdev
->irq
.crtc_vblank_int
[2] ||
2282 rdev
->irq
.pflip
[2]) {
2283 DRM_DEBUG("evergreen_irq_set: vblank 2\n");
2284 crtc3
|= VBLANK_INT_MASK
;
2286 if (rdev
->irq
.crtc_vblank_int
[3] ||
2287 rdev
->irq
.pflip
[3]) {
2288 DRM_DEBUG("evergreen_irq_set: vblank 3\n");
2289 crtc4
|= VBLANK_INT_MASK
;
2291 if (rdev
->irq
.crtc_vblank_int
[4] ||
2292 rdev
->irq
.pflip
[4]) {
2293 DRM_DEBUG("evergreen_irq_set: vblank 4\n");
2294 crtc5
|= VBLANK_INT_MASK
;
2296 if (rdev
->irq
.crtc_vblank_int
[5] ||
2297 rdev
->irq
.pflip
[5]) {
2298 DRM_DEBUG("evergreen_irq_set: vblank 5\n");
2299 crtc6
|= VBLANK_INT_MASK
;
2301 if (rdev
->irq
.hpd
[0]) {
2302 DRM_DEBUG("evergreen_irq_set: hpd 1\n");
2303 hpd1
|= DC_HPDx_INT_EN
;
2305 if (rdev
->irq
.hpd
[1]) {
2306 DRM_DEBUG("evergreen_irq_set: hpd 2\n");
2307 hpd2
|= DC_HPDx_INT_EN
;
2309 if (rdev
->irq
.hpd
[2]) {
2310 DRM_DEBUG("evergreen_irq_set: hpd 3\n");
2311 hpd3
|= DC_HPDx_INT_EN
;
2313 if (rdev
->irq
.hpd
[3]) {
2314 DRM_DEBUG("evergreen_irq_set: hpd 4\n");
2315 hpd4
|= DC_HPDx_INT_EN
;
2317 if (rdev
->irq
.hpd
[4]) {
2318 DRM_DEBUG("evergreen_irq_set: hpd 5\n");
2319 hpd5
|= DC_HPDx_INT_EN
;
2321 if (rdev
->irq
.hpd
[5]) {
2322 DRM_DEBUG("evergreen_irq_set: hpd 6\n");
2323 hpd6
|= DC_HPDx_INT_EN
;
2325 if (rdev
->irq
.gui_idle
) {
2326 DRM_DEBUG("gui idle\n");
2327 grbm_int_cntl
|= GUI_IDLE_INT_ENABLE
;
2330 WREG32(CP_INT_CNTL
, cp_int_cntl
);
2331 WREG32(GRBM_INT_CNTL
, grbm_int_cntl
);
2333 WREG32(INT_MASK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, crtc1
);
2334 WREG32(INT_MASK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, crtc2
);
2335 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
2336 WREG32(INT_MASK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, crtc3
);
2337 WREG32(INT_MASK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, crtc4
);
2338 WREG32(INT_MASK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, crtc5
);
2339 WREG32(INT_MASK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, crtc6
);
2342 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, grph1
);
2343 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, grph2
);
2344 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, grph3
);
2345 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, grph4
);
2346 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, grph5
);
2347 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, grph6
);
2349 WREG32(DC_HPD1_INT_CONTROL
, hpd1
);
2350 WREG32(DC_HPD2_INT_CONTROL
, hpd2
);
2351 WREG32(DC_HPD3_INT_CONTROL
, hpd3
);
2352 WREG32(DC_HPD4_INT_CONTROL
, hpd4
);
2353 WREG32(DC_HPD5_INT_CONTROL
, hpd5
);
2354 WREG32(DC_HPD6_INT_CONTROL
, hpd6
);
2359 static inline void evergreen_irq_ack(struct radeon_device
*rdev
)
2363 rdev
->irq
.stat_regs
.evergreen
.disp_int
= RREG32(DISP_INTERRUPT_STATUS
);
2364 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE
);
2365 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE2
);
2366 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE3
);
2367 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE4
);
2368 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE5
);
2369 rdev
->irq
.stat_regs
.evergreen
.d1grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
);
2370 rdev
->irq
.stat_regs
.evergreen
.d2grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
);
2371 rdev
->irq
.stat_regs
.evergreen
.d3grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
);
2372 rdev
->irq
.stat_regs
.evergreen
.d4grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
);
2373 rdev
->irq
.stat_regs
.evergreen
.d5grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
);
2374 rdev
->irq
.stat_regs
.evergreen
.d6grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
);
2376 if (rdev
->irq
.stat_regs
.evergreen
.d1grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2377 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2378 if (rdev
->irq
.stat_regs
.evergreen
.d2grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2379 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2380 if (rdev
->irq
.stat_regs
.evergreen
.d3grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2381 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2382 if (rdev
->irq
.stat_regs
.evergreen
.d4grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2383 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2384 if (rdev
->irq
.stat_regs
.evergreen
.d5grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2385 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2386 if (rdev
->irq
.stat_regs
.evergreen
.d6grph_int
& GRPH_PFLIP_INT_OCCURRED
)
2387 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
2389 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& LB_D1_VBLANK_INTERRUPT
)
2390 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, VBLANK_ACK
);
2391 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& LB_D1_VLINE_INTERRUPT
)
2392 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, VLINE_ACK
);
2394 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& LB_D2_VBLANK_INTERRUPT
)
2395 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, VBLANK_ACK
);
2396 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& LB_D2_VLINE_INTERRUPT
)
2397 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, VLINE_ACK
);
2399 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& LB_D3_VBLANK_INTERRUPT
)
2400 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, VBLANK_ACK
);
2401 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& LB_D3_VLINE_INTERRUPT
)
2402 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, VLINE_ACK
);
2404 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& LB_D4_VBLANK_INTERRUPT
)
2405 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, VBLANK_ACK
);
2406 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& LB_D4_VLINE_INTERRUPT
)
2407 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, VLINE_ACK
);
2409 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& LB_D5_VBLANK_INTERRUPT
)
2410 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, VBLANK_ACK
);
2411 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& LB_D5_VLINE_INTERRUPT
)
2412 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, VLINE_ACK
);
2414 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& LB_D6_VBLANK_INTERRUPT
)
2415 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, VBLANK_ACK
);
2416 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& LB_D6_VLINE_INTERRUPT
)
2417 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, VLINE_ACK
);
2419 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& DC_HPD1_INTERRUPT
) {
2420 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
2421 tmp
|= DC_HPDx_INT_ACK
;
2422 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
2424 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& DC_HPD2_INTERRUPT
) {
2425 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
2426 tmp
|= DC_HPDx_INT_ACK
;
2427 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
2429 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& DC_HPD3_INTERRUPT
) {
2430 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
2431 tmp
|= DC_HPDx_INT_ACK
;
2432 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
2434 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& DC_HPD4_INTERRUPT
) {
2435 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
2436 tmp
|= DC_HPDx_INT_ACK
;
2437 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
2439 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& DC_HPD5_INTERRUPT
) {
2440 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
2441 tmp
|= DC_HPDx_INT_ACK
;
2442 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
2444 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& DC_HPD6_INTERRUPT
) {
2445 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
2446 tmp
|= DC_HPDx_INT_ACK
;
2447 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
2451 void evergreen_irq_disable(struct radeon_device
*rdev
)
2453 r600_disable_interrupts(rdev
);
2454 /* Wait and acknowledge irq */
2456 evergreen_irq_ack(rdev
);
2457 evergreen_disable_interrupt_state(rdev
);
2460 static void evergreen_irq_suspend(struct radeon_device
*rdev
)
2462 evergreen_irq_disable(rdev
);
2463 r600_rlc_stop(rdev
);
2466 static inline u32
evergreen_get_ih_wptr(struct radeon_device
*rdev
)
2470 if (rdev
->wb
.enabled
)
2471 wptr
= rdev
->wb
.wb
[R600_WB_IH_WPTR_OFFSET
/4];
2473 wptr
= RREG32(IH_RB_WPTR
);
2475 if (wptr
& RB_OVERFLOW
) {
2476 /* When a ring buffer overflow happen start parsing interrupt
2477 * from the last not overwritten vector (wptr + 16). Hopefully
2478 * this should allow us to catchup.
2480 dev_warn(rdev
->dev
, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2481 wptr
, rdev
->ih
.rptr
, (wptr
+ 16) + rdev
->ih
.ptr_mask
);
2482 rdev
->ih
.rptr
= (wptr
+ 16) & rdev
->ih
.ptr_mask
;
2483 tmp
= RREG32(IH_RB_CNTL
);
2484 tmp
|= IH_WPTR_OVERFLOW_CLEAR
;
2485 WREG32(IH_RB_CNTL
, tmp
);
2487 return (wptr
& rdev
->ih
.ptr_mask
);
2490 int evergreen_irq_process(struct radeon_device
*rdev
)
2492 u32 wptr
= evergreen_get_ih_wptr(rdev
);
2493 u32 rptr
= rdev
->ih
.rptr
;
2494 u32 src_id
, src_data
;
2496 unsigned long flags
;
2497 bool queue_hotplug
= false;
2499 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr
, wptr
);
2500 if (!rdev
->ih
.enabled
)
2503 spin_lock_irqsave(&rdev
->ih
.lock
, flags
);
2506 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
2509 if (rdev
->shutdown
) {
2510 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
2515 /* display interrupts */
2516 evergreen_irq_ack(rdev
);
2518 rdev
->ih
.wptr
= wptr
;
2519 while (rptr
!= wptr
) {
2520 /* wptr/rptr are in bytes! */
2521 ring_index
= rptr
/ 4;
2522 src_id
= rdev
->ih
.ring
[ring_index
] & 0xff;
2523 src_data
= rdev
->ih
.ring
[ring_index
+ 1] & 0xfffffff;
2526 case 1: /* D1 vblank/vline */
2528 case 0: /* D1 vblank */
2529 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& LB_D1_VBLANK_INTERRUPT
) {
2530 if (rdev
->irq
.crtc_vblank_int
[0]) {
2531 drm_handle_vblank(rdev
->ddev
, 0);
2532 rdev
->pm
.vblank_sync
= true;
2533 wake_up(&rdev
->irq
.vblank_queue
);
2535 if (rdev
->irq
.pflip
[0])
2536 radeon_crtc_handle_flip(rdev
, 0);
2537 rdev
->irq
.stat_regs
.evergreen
.disp_int
&= ~LB_D1_VBLANK_INTERRUPT
;
2538 DRM_DEBUG("IH: D1 vblank\n");
2541 case 1: /* D1 vline */
2542 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& LB_D1_VLINE_INTERRUPT
) {
2543 rdev
->irq
.stat_regs
.evergreen
.disp_int
&= ~LB_D1_VLINE_INTERRUPT
;
2544 DRM_DEBUG("IH: D1 vline\n");
2548 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2552 case 2: /* D2 vblank/vline */
2554 case 0: /* D2 vblank */
2555 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& LB_D2_VBLANK_INTERRUPT
) {
2556 if (rdev
->irq
.crtc_vblank_int
[1]) {
2557 drm_handle_vblank(rdev
->ddev
, 1);
2558 rdev
->pm
.vblank_sync
= true;
2559 wake_up(&rdev
->irq
.vblank_queue
);
2561 if (rdev
->irq
.pflip
[1])
2562 radeon_crtc_handle_flip(rdev
, 1);
2563 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
&= ~LB_D2_VBLANK_INTERRUPT
;
2564 DRM_DEBUG("IH: D2 vblank\n");
2567 case 1: /* D2 vline */
2568 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& LB_D2_VLINE_INTERRUPT
) {
2569 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
&= ~LB_D2_VLINE_INTERRUPT
;
2570 DRM_DEBUG("IH: D2 vline\n");
2574 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2578 case 3: /* D3 vblank/vline */
2580 case 0: /* D3 vblank */
2581 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& LB_D3_VBLANK_INTERRUPT
) {
2582 if (rdev
->irq
.crtc_vblank_int
[2]) {
2583 drm_handle_vblank(rdev
->ddev
, 2);
2584 rdev
->pm
.vblank_sync
= true;
2585 wake_up(&rdev
->irq
.vblank_queue
);
2587 if (rdev
->irq
.pflip
[2])
2588 radeon_crtc_handle_flip(rdev
, 2);
2589 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
&= ~LB_D3_VBLANK_INTERRUPT
;
2590 DRM_DEBUG("IH: D3 vblank\n");
2593 case 1: /* D3 vline */
2594 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& LB_D3_VLINE_INTERRUPT
) {
2595 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
&= ~LB_D3_VLINE_INTERRUPT
;
2596 DRM_DEBUG("IH: D3 vline\n");
2600 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2604 case 4: /* D4 vblank/vline */
2606 case 0: /* D4 vblank */
2607 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& LB_D4_VBLANK_INTERRUPT
) {
2608 if (rdev
->irq
.crtc_vblank_int
[3]) {
2609 drm_handle_vblank(rdev
->ddev
, 3);
2610 rdev
->pm
.vblank_sync
= true;
2611 wake_up(&rdev
->irq
.vblank_queue
);
2613 if (rdev
->irq
.pflip
[3])
2614 radeon_crtc_handle_flip(rdev
, 3);
2615 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
&= ~LB_D4_VBLANK_INTERRUPT
;
2616 DRM_DEBUG("IH: D4 vblank\n");
2619 case 1: /* D4 vline */
2620 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& LB_D4_VLINE_INTERRUPT
) {
2621 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
&= ~LB_D4_VLINE_INTERRUPT
;
2622 DRM_DEBUG("IH: D4 vline\n");
2626 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2630 case 5: /* D5 vblank/vline */
2632 case 0: /* D5 vblank */
2633 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& LB_D5_VBLANK_INTERRUPT
) {
2634 if (rdev
->irq
.crtc_vblank_int
[4]) {
2635 drm_handle_vblank(rdev
->ddev
, 4);
2636 rdev
->pm
.vblank_sync
= true;
2637 wake_up(&rdev
->irq
.vblank_queue
);
2639 if (rdev
->irq
.pflip
[4])
2640 radeon_crtc_handle_flip(rdev
, 4);
2641 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
&= ~LB_D5_VBLANK_INTERRUPT
;
2642 DRM_DEBUG("IH: D5 vblank\n");
2645 case 1: /* D5 vline */
2646 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& LB_D5_VLINE_INTERRUPT
) {
2647 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
&= ~LB_D5_VLINE_INTERRUPT
;
2648 DRM_DEBUG("IH: D5 vline\n");
2652 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2656 case 6: /* D6 vblank/vline */
2658 case 0: /* D6 vblank */
2659 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& LB_D6_VBLANK_INTERRUPT
) {
2660 if (rdev
->irq
.crtc_vblank_int
[5]) {
2661 drm_handle_vblank(rdev
->ddev
, 5);
2662 rdev
->pm
.vblank_sync
= true;
2663 wake_up(&rdev
->irq
.vblank_queue
);
2665 if (rdev
->irq
.pflip
[5])
2666 radeon_crtc_handle_flip(rdev
, 5);
2667 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
&= ~LB_D6_VBLANK_INTERRUPT
;
2668 DRM_DEBUG("IH: D6 vblank\n");
2671 case 1: /* D6 vline */
2672 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& LB_D6_VLINE_INTERRUPT
) {
2673 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
&= ~LB_D6_VLINE_INTERRUPT
;
2674 DRM_DEBUG("IH: D6 vline\n");
2678 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2682 case 42: /* HPD hotplug */
2685 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& DC_HPD1_INTERRUPT
) {
2686 rdev
->irq
.stat_regs
.evergreen
.disp_int
&= ~DC_HPD1_INTERRUPT
;
2687 queue_hotplug
= true;
2688 DRM_DEBUG("IH: HPD1\n");
2692 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& DC_HPD2_INTERRUPT
) {
2693 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
&= ~DC_HPD2_INTERRUPT
;
2694 queue_hotplug
= true;
2695 DRM_DEBUG("IH: HPD2\n");
2699 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& DC_HPD3_INTERRUPT
) {
2700 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
&= ~DC_HPD3_INTERRUPT
;
2701 queue_hotplug
= true;
2702 DRM_DEBUG("IH: HPD3\n");
2706 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& DC_HPD4_INTERRUPT
) {
2707 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
&= ~DC_HPD4_INTERRUPT
;
2708 queue_hotplug
= true;
2709 DRM_DEBUG("IH: HPD4\n");
2713 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& DC_HPD5_INTERRUPT
) {
2714 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
&= ~DC_HPD5_INTERRUPT
;
2715 queue_hotplug
= true;
2716 DRM_DEBUG("IH: HPD5\n");
2720 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& DC_HPD6_INTERRUPT
) {
2721 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
&= ~DC_HPD6_INTERRUPT
;
2722 queue_hotplug
= true;
2723 DRM_DEBUG("IH: HPD6\n");
2727 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2731 case 176: /* CP_INT in ring buffer */
2732 case 177: /* CP_INT in IB1 */
2733 case 178: /* CP_INT in IB2 */
2734 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data
);
2735 radeon_fence_process(rdev
);
2737 case 181: /* CP EOP event */
2738 DRM_DEBUG("IH: CP EOP\n");
2739 radeon_fence_process(rdev
);
2741 case 233: /* GUI IDLE */
2742 DRM_DEBUG("IH: CP EOP\n");
2743 rdev
->pm
.gui_idle
= true;
2744 wake_up(&rdev
->irq
.idle_queue
);
2747 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2751 /* wptr/rptr are in bytes! */
2753 rptr
&= rdev
->ih
.ptr_mask
;
2755 /* make sure wptr hasn't changed while processing */
2756 wptr
= evergreen_get_ih_wptr(rdev
);
2757 if (wptr
!= rdev
->ih
.wptr
)
2760 schedule_work(&rdev
->hotplug_work
);
2761 rdev
->ih
.rptr
= rptr
;
2762 WREG32(IH_RB_RPTR
, rdev
->ih
.rptr
);
2763 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
2767 static int evergreen_startup(struct radeon_device
*rdev
)
2771 /* enable pcie gen2 link */
2772 evergreen_pcie_gen2_enable(rdev
);
2774 if (!rdev
->me_fw
|| !rdev
->pfp_fw
|| !rdev
->rlc_fw
) {
2775 r
= r600_init_microcode(rdev
);
2777 DRM_ERROR("Failed to load firmware!\n");
2782 evergreen_mc_program(rdev
);
2783 if (rdev
->flags
& RADEON_IS_AGP
) {
2784 evergreen_agp_enable(rdev
);
2786 r
= evergreen_pcie_gart_enable(rdev
);
2790 evergreen_gpu_init(rdev
);
2792 r
= evergreen_blit_init(rdev
);
2794 evergreen_blit_fini(rdev
);
2795 rdev
->asic
->copy
= NULL
;
2796 dev_warn(rdev
->dev
, "failed blitter (%d) falling back to memcpy\n", r
);
2798 /* XXX: ontario has problems blitting to gart at the moment */
2799 if (rdev
->family
== CHIP_PALM
) {
2800 rdev
->asic
->copy
= NULL
;
2801 rdev
->mc
.active_vram_size
= rdev
->mc
.visible_vram_size
;
2804 /* allocate wb buffer */
2805 r
= radeon_wb_init(rdev
);
2810 r
= r600_irq_init(rdev
);
2812 DRM_ERROR("radeon: IH init failed (%d).\n", r
);
2813 radeon_irq_kms_fini(rdev
);
2816 evergreen_irq_set(rdev
);
2818 r
= radeon_ring_init(rdev
, rdev
->cp
.ring_size
);
2821 r
= evergreen_cp_load_microcode(rdev
);
2824 r
= evergreen_cp_resume(rdev
);
2831 int evergreen_resume(struct radeon_device
*rdev
)
2835 /* reset the asic, the gfx blocks are often in a bad state
2836 * after the driver is unloaded or after a resume
2838 if (radeon_asic_reset(rdev
))
2839 dev_warn(rdev
->dev
, "GPU reset failed !\n");
2840 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
2841 * posting will perform necessary task to bring back GPU into good
2845 atom_asic_init(rdev
->mode_info
.atom_context
);
2847 r
= evergreen_startup(rdev
);
2849 DRM_ERROR("r600 startup failed on resume\n");
2853 r
= r600_ib_test(rdev
);
2855 DRM_ERROR("radeon: failled testing IB (%d).\n", r
);
2863 int evergreen_suspend(struct radeon_device
*rdev
)
2867 /* FIXME: we should wait for ring to be empty */
2869 rdev
->cp
.ready
= false;
2870 evergreen_irq_suspend(rdev
);
2871 radeon_wb_disable(rdev
);
2872 evergreen_pcie_gart_disable(rdev
);
2874 /* unpin shaders bo */
2875 r
= radeon_bo_reserve(rdev
->r600_blit
.shader_obj
, false);
2876 if (likely(r
== 0)) {
2877 radeon_bo_unpin(rdev
->r600_blit
.shader_obj
);
2878 radeon_bo_unreserve(rdev
->r600_blit
.shader_obj
);
2884 int evergreen_copy_blit(struct radeon_device
*rdev
,
2885 uint64_t src_offset
, uint64_t dst_offset
,
2886 unsigned num_pages
, struct radeon_fence
*fence
)
2890 mutex_lock(&rdev
->r600_blit
.mutex
);
2891 rdev
->r600_blit
.vb_ib
= NULL
;
2892 r
= evergreen_blit_prepare_copy(rdev
, num_pages
* RADEON_GPU_PAGE_SIZE
);
2894 if (rdev
->r600_blit
.vb_ib
)
2895 radeon_ib_free(rdev
, &rdev
->r600_blit
.vb_ib
);
2896 mutex_unlock(&rdev
->r600_blit
.mutex
);
2899 evergreen_kms_blit_copy(rdev
, src_offset
, dst_offset
, num_pages
* RADEON_GPU_PAGE_SIZE
);
2900 evergreen_blit_done_copy(rdev
, fence
);
2901 mutex_unlock(&rdev
->r600_blit
.mutex
);
2905 static bool evergreen_card_posted(struct radeon_device
*rdev
)
2909 /* first check CRTCs */
2910 if (rdev
->flags
& RADEON_IS_IGP
)
2911 reg
= RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
) |
2912 RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
);
2914 reg
= RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
) |
2915 RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
) |
2916 RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
) |
2917 RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
) |
2918 RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
) |
2919 RREG32(EVERGREEN_CRTC_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
);
2920 if (reg
& EVERGREEN_CRTC_MASTER_EN
)
2923 /* then check MEM_SIZE, in case the crtcs are off */
2924 if (RREG32(CONFIG_MEMSIZE
))
2930 /* Plan is to move initialization in that function and use
2931 * helper function so that radeon_device_init pretty much
2932 * do nothing more than calling asic specific function. This
2933 * should also allow to remove a bunch of callback function
2936 int evergreen_init(struct radeon_device
*rdev
)
2940 r
= radeon_dummy_page_init(rdev
);
2943 /* This don't do much */
2944 r
= radeon_gem_init(rdev
);
2948 if (!radeon_get_bios(rdev
)) {
2949 if (ASIC_IS_AVIVO(rdev
))
2952 /* Must be an ATOMBIOS */
2953 if (!rdev
->is_atom_bios
) {
2954 dev_err(rdev
->dev
, "Expecting atombios for R600 GPU\n");
2957 r
= radeon_atombios_init(rdev
);
2960 /* reset the asic, the gfx blocks are often in a bad state
2961 * after the driver is unloaded or after a resume
2963 if (radeon_asic_reset(rdev
))
2964 dev_warn(rdev
->dev
, "GPU reset failed !\n");
2965 /* Post card if necessary */
2966 if (!evergreen_card_posted(rdev
)) {
2968 dev_err(rdev
->dev
, "Card not posted and no BIOS - ignoring\n");
2971 DRM_INFO("GPU not posted. posting now...\n");
2972 atom_asic_init(rdev
->mode_info
.atom_context
);
2974 /* Initialize scratch registers */
2975 r600_scratch_init(rdev
);
2976 /* Initialize surface registers */
2977 radeon_surface_init(rdev
);
2978 /* Initialize clocks */
2979 radeon_get_clock_info(rdev
->ddev
);
2981 r
= radeon_fence_driver_init(rdev
);
2984 /* initialize AGP */
2985 if (rdev
->flags
& RADEON_IS_AGP
) {
2986 r
= radeon_agp_init(rdev
);
2988 radeon_agp_disable(rdev
);
2990 /* initialize memory controller */
2991 r
= evergreen_mc_init(rdev
);
2994 /* Memory manager */
2995 r
= radeon_bo_init(rdev
);
2999 r
= radeon_irq_kms_init(rdev
);
3003 rdev
->cp
.ring_obj
= NULL
;
3004 r600_ring_init(rdev
, 1024 * 1024);
3006 rdev
->ih
.ring_obj
= NULL
;
3007 r600_ih_ring_init(rdev
, 64 * 1024);
3009 r
= r600_pcie_gart_init(rdev
);
3013 rdev
->accel_working
= true;
3014 r
= evergreen_startup(rdev
);
3016 dev_err(rdev
->dev
, "disabling GPU acceleration\n");
3018 r600_irq_fini(rdev
);
3019 radeon_wb_fini(rdev
);
3020 radeon_irq_kms_fini(rdev
);
3021 evergreen_pcie_gart_fini(rdev
);
3022 rdev
->accel_working
= false;
3024 if (rdev
->accel_working
) {
3025 r
= radeon_ib_pool_init(rdev
);
3027 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r
);
3028 rdev
->accel_working
= false;
3030 r
= r600_ib_test(rdev
);
3032 DRM_ERROR("radeon: failed testing IB (%d).\n", r
);
3033 rdev
->accel_working
= false;
3039 void evergreen_fini(struct radeon_device
*rdev
)
3041 evergreen_blit_fini(rdev
);
3043 r600_irq_fini(rdev
);
3044 radeon_wb_fini(rdev
);
3045 radeon_irq_kms_fini(rdev
);
3046 evergreen_pcie_gart_fini(rdev
);
3047 radeon_gem_fini(rdev
);
3048 radeon_fence_driver_fini(rdev
);
3049 radeon_agp_fini(rdev
);
3050 radeon_bo_fini(rdev
);
3051 radeon_atombios_fini(rdev
);
3054 radeon_dummy_page_fini(rdev
);
3057 static void evergreen_pcie_gen2_enable(struct radeon_device
*rdev
)
3059 u32 link_width_cntl
, speed_cntl
;
3061 if (rdev
->flags
& RADEON_IS_IGP
)
3064 if (!(rdev
->flags
& RADEON_IS_PCIE
))
3067 /* x2 cards have a special sequence */
3068 if (ASIC_IS_X2(rdev
))
3071 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3072 if ((speed_cntl
& LC_OTHER_SIDE_EVER_SENT_GEN2
) ||
3073 (speed_cntl
& LC_OTHER_SIDE_SUPPORTS_GEN2
)) {
3075 link_width_cntl
= RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
);
3076 link_width_cntl
&= ~LC_UPCONFIGURE_DIS
;
3077 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
3079 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3080 speed_cntl
&= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN
;
3081 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL
, speed_cntl
);
3083 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3084 speed_cntl
|= LC_CLR_FAILED_SPD_CHANGE_CNT
;
3085 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL
, speed_cntl
);
3087 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3088 speed_cntl
&= ~LC_CLR_FAILED_SPD_CHANGE_CNT
;
3089 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL
, speed_cntl
);
3091 speed_cntl
= RREG32_PCIE_P(PCIE_LC_SPEED_CNTL
);
3092 speed_cntl
|= LC_GEN2_EN_STRAP
;
3093 WREG32_PCIE_P(PCIE_LC_SPEED_CNTL
, speed_cntl
);
3096 link_width_cntl
= RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
);
3097 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3099 link_width_cntl
|= LC_UPCONFIGURE_DIS
;
3101 link_width_cntl
&= ~LC_UPCONFIGURE_DIS
;
3102 WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);