2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
29 #include <linux/firmware.h>
30 #include <linux/module.h>
32 #include <uapi_drm/radeon_drm.h>
34 #include "radeon_asic.h"
35 #include "radeon_mode.h"
39 #include "radeon_ucode.h"
42 MODULE_FIRMWARE("radeon/R600_pfp.bin");
43 MODULE_FIRMWARE("radeon/R600_me.bin");
44 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
45 MODULE_FIRMWARE("radeon/RV610_me.bin");
46 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
47 MODULE_FIRMWARE("radeon/RV630_me.bin");
48 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
49 MODULE_FIRMWARE("radeon/RV620_me.bin");
50 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
51 MODULE_FIRMWARE("radeon/RV635_me.bin");
52 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
53 MODULE_FIRMWARE("radeon/RV670_me.bin");
54 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
55 MODULE_FIRMWARE("radeon/RS780_me.bin");
56 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
57 MODULE_FIRMWARE("radeon/RV770_me.bin");
58 MODULE_FIRMWARE("radeon/RV770_smc.bin");
59 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
60 MODULE_FIRMWARE("radeon/RV730_me.bin");
61 MODULE_FIRMWARE("radeon/RV730_smc.bin");
62 MODULE_FIRMWARE("radeon/RV740_smc.bin");
63 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
64 MODULE_FIRMWARE("radeon/RV710_me.bin");
65 MODULE_FIRMWARE("radeon/RV710_smc.bin");
66 MODULE_FIRMWARE("radeon/R600_rlc.bin");
67 MODULE_FIRMWARE("radeon/R700_rlc.bin");
68 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
69 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
70 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
71 MODULE_FIRMWARE("radeon/CEDAR_smc.bin");
72 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
73 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
74 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
75 MODULE_FIRMWARE("radeon/REDWOOD_smc.bin");
76 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
77 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
78 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
79 MODULE_FIRMWARE("radeon/JUNIPER_smc.bin");
80 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
81 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
82 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
83 MODULE_FIRMWARE("radeon/CYPRESS_smc.bin");
84 MODULE_FIRMWARE("radeon/PALM_pfp.bin");
85 MODULE_FIRMWARE("radeon/PALM_me.bin");
86 MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
87 MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
88 MODULE_FIRMWARE("radeon/SUMO_me.bin");
89 MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
90 MODULE_FIRMWARE("radeon/SUMO2_me.bin");
91 MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
92 MODULE_FIRMWARE("radeon/OLAND_me.bin");
93 MODULE_FIRMWARE("radeon/OLAND_ce.bin");
94 MODULE_FIRMWARE("radeon/OLAND_mc.bin");
95 MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
97 static const u32 crtc_offsets
[2] =
100 AVIVO_D2CRTC_H_TOTAL
- AVIVO_D1CRTC_H_TOTAL
103 int r600_debugfs_mc_info_init(struct radeon_device
*rdev
);
105 /* r600,rv610,rv630,rv620,rv635,rv670 */
106 static void r600_gpu_init(struct radeon_device
*rdev
);
107 void r600_irq_disable(struct radeon_device
*rdev
);
108 static void r600_pcie_gen2_enable(struct radeon_device
*rdev
);
111 * r600_get_xclk - get the xclk
113 * @rdev: radeon_device pointer
115 * Returns the reference clock used by the gfx engine
116 * (r6xx, IGPs, APUs).
118 u32
r600_get_xclk(struct radeon_device
*rdev
)
120 return rdev
->clock
.spll
.reference_freq
;
123 int r600_set_uvd_clocks(struct radeon_device
*rdev
, u32 vclk
, u32 dclk
)
125 unsigned fb_div
= 0, ref_div
, vclk_div
= 0, dclk_div
= 0;
128 /* bypass vclk and dclk with bclk */
129 WREG32_P(CG_UPLL_FUNC_CNTL_2
,
130 VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1),
131 ~(VCLK_SRC_SEL_MASK
| DCLK_SRC_SEL_MASK
));
133 /* assert BYPASS_EN, deassert UPLL_RESET, UPLL_SLEEP and UPLL_CTLREQ */
134 WREG32_P(CG_UPLL_FUNC_CNTL
, UPLL_BYPASS_EN_MASK
, ~(
135 UPLL_RESET_MASK
| UPLL_SLEEP_MASK
| UPLL_CTLREQ_MASK
));
137 if (rdev
->family
>= CHIP_RS780
)
138 WREG32_P(GFX_MACRO_BYPASS_CNTL
, UPLL_BYPASS_CNTL
,
141 if (!vclk
|| !dclk
) {
142 /* keep the Bypass mode, put PLL to sleep */
143 WREG32_P(CG_UPLL_FUNC_CNTL
, UPLL_SLEEP_MASK
, ~UPLL_SLEEP_MASK
);
147 if (rdev
->clock
.spll
.reference_freq
== 10000)
152 r
= radeon_uvd_calc_upll_dividers(rdev
, vclk
, dclk
, 50000, 160000,
153 ref_div
+ 1, 0xFFF, 2, 30, ~0,
154 &fb_div
, &vclk_div
, &dclk_div
);
158 if (rdev
->family
>= CHIP_RV670
&& rdev
->family
< CHIP_RS780
)
163 r
= radeon_uvd_send_upll_ctlreq(rdev
, CG_UPLL_FUNC_CNTL
);
167 /* assert PLL_RESET */
168 WREG32_P(CG_UPLL_FUNC_CNTL
, UPLL_RESET_MASK
, ~UPLL_RESET_MASK
);
170 /* For RS780 we have to choose ref clk */
171 if (rdev
->family
>= CHIP_RS780
)
172 WREG32_P(CG_UPLL_FUNC_CNTL
, UPLL_REFCLK_SRC_SEL_MASK
,
173 ~UPLL_REFCLK_SRC_SEL_MASK
);
175 /* set the required fb, ref and post divder values */
176 WREG32_P(CG_UPLL_FUNC_CNTL
,
177 UPLL_FB_DIV(fb_div
) |
178 UPLL_REF_DIV(ref_div
),
179 ~(UPLL_FB_DIV_MASK
| UPLL_REF_DIV_MASK
));
180 WREG32_P(CG_UPLL_FUNC_CNTL_2
,
181 UPLL_SW_HILEN(vclk_div
>> 1) |
182 UPLL_SW_LOLEN((vclk_div
>> 1) + (vclk_div
& 1)) |
183 UPLL_SW_HILEN2(dclk_div
>> 1) |
184 UPLL_SW_LOLEN2((dclk_div
>> 1) + (dclk_div
& 1)) |
185 UPLL_DIVEN_MASK
| UPLL_DIVEN2_MASK
,
188 /* give the PLL some time to settle */
191 /* deassert PLL_RESET */
192 WREG32_P(CG_UPLL_FUNC_CNTL
, 0, ~UPLL_RESET_MASK
);
196 /* deassert BYPASS EN */
197 WREG32_P(CG_UPLL_FUNC_CNTL
, 0, ~UPLL_BYPASS_EN_MASK
);
199 if (rdev
->family
>= CHIP_RS780
)
200 WREG32_P(GFX_MACRO_BYPASS_CNTL
, 0, ~UPLL_BYPASS_CNTL
);
202 r
= radeon_uvd_send_upll_ctlreq(rdev
, CG_UPLL_FUNC_CNTL
);
206 /* switch VCLK and DCLK selection */
207 WREG32_P(CG_UPLL_FUNC_CNTL_2
,
208 VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2),
209 ~(VCLK_SRC_SEL_MASK
| DCLK_SRC_SEL_MASK
));
216 void dce3_program_fmt(struct drm_encoder
*encoder
)
218 struct drm_device
*dev
= encoder
->dev
;
219 struct radeon_device
*rdev
= dev
->dev_private
;
220 struct radeon_encoder
*radeon_encoder
= to_radeon_encoder(encoder
);
221 struct radeon_crtc
*radeon_crtc
= to_radeon_crtc(encoder
->crtc
);
222 struct drm_connector
*connector
= radeon_get_connector_for_encoder(encoder
);
225 enum radeon_connector_dither dither
= RADEON_FMT_DITHER_DISABLE
;
228 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
229 bpc
= radeon_get_monitor_bpc(connector
);
230 dither
= radeon_connector
->dither
;
233 /* LVDS FMT is set up by atom */
234 if (radeon_encoder
->devices
& ATOM_DEVICE_LCD_SUPPORT
)
237 /* not needed for analog */
238 if ((radeon_encoder
->encoder_id
== ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1
) ||
239 (radeon_encoder
->encoder_id
== ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2
))
247 if (dither
== RADEON_FMT_DITHER_ENABLE
)
248 /* XXX sort out optimal dither settings */
249 tmp
|= FMT_SPATIAL_DITHER_EN
;
251 tmp
|= FMT_TRUNCATE_EN
;
254 if (dither
== RADEON_FMT_DITHER_ENABLE
)
255 /* XXX sort out optimal dither settings */
256 tmp
|= (FMT_SPATIAL_DITHER_EN
| FMT_SPATIAL_DITHER_DEPTH
);
258 tmp
|= (FMT_TRUNCATE_EN
| FMT_TRUNCATE_DEPTH
);
266 WREG32(FMT_BIT_DEPTH_CONTROL
+ radeon_crtc
->crtc_offset
, tmp
);
269 /* get temperature in millidegrees */
270 int rv6xx_get_temp(struct radeon_device
*rdev
)
272 u32 temp
= (RREG32(CG_THERMAL_STATUS
) & ASIC_T_MASK
) >>
274 int actual_temp
= temp
& 0xff;
279 return actual_temp
* 1000;
282 void r600_pm_get_dynpm_state(struct radeon_device
*rdev
)
286 rdev
->pm
.dynpm_can_upclock
= true;
287 rdev
->pm
.dynpm_can_downclock
= true;
289 /* power state array is low to high, default is first */
290 if ((rdev
->flags
& RADEON_IS_IGP
) || (rdev
->family
== CHIP_R600
)) {
291 int min_power_state_index
= 0;
293 if (rdev
->pm
.num_power_states
> 2)
294 min_power_state_index
= 1;
296 switch (rdev
->pm
.dynpm_planned_action
) {
297 case DYNPM_ACTION_MINIMUM
:
298 rdev
->pm
.requested_power_state_index
= min_power_state_index
;
299 rdev
->pm
.requested_clock_mode_index
= 0;
300 rdev
->pm
.dynpm_can_downclock
= false;
302 case DYNPM_ACTION_DOWNCLOCK
:
303 if (rdev
->pm
.current_power_state_index
== min_power_state_index
) {
304 rdev
->pm
.requested_power_state_index
= rdev
->pm
.current_power_state_index
;
305 rdev
->pm
.dynpm_can_downclock
= false;
307 if (rdev
->pm
.active_crtc_count
> 1) {
308 for (i
= 0; i
< rdev
->pm
.num_power_states
; i
++) {
309 if (rdev
->pm
.power_state
[i
].flags
& RADEON_PM_STATE_SINGLE_DISPLAY_ONLY
)
311 else if (i
>= rdev
->pm
.current_power_state_index
) {
312 rdev
->pm
.requested_power_state_index
=
313 rdev
->pm
.current_power_state_index
;
316 rdev
->pm
.requested_power_state_index
= i
;
321 if (rdev
->pm
.current_power_state_index
== 0)
322 rdev
->pm
.requested_power_state_index
=
323 rdev
->pm
.num_power_states
- 1;
325 rdev
->pm
.requested_power_state_index
=
326 rdev
->pm
.current_power_state_index
- 1;
329 rdev
->pm
.requested_clock_mode_index
= 0;
330 /* don't use the power state if crtcs are active and no display flag is set */
331 if ((rdev
->pm
.active_crtc_count
> 0) &&
332 (rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
333 clock_info
[rdev
->pm
.requested_clock_mode_index
].flags
&
334 RADEON_PM_MODE_NO_DISPLAY
)) {
335 rdev
->pm
.requested_power_state_index
++;
338 case DYNPM_ACTION_UPCLOCK
:
339 if (rdev
->pm
.current_power_state_index
== (rdev
->pm
.num_power_states
- 1)) {
340 rdev
->pm
.requested_power_state_index
= rdev
->pm
.current_power_state_index
;
341 rdev
->pm
.dynpm_can_upclock
= false;
343 if (rdev
->pm
.active_crtc_count
> 1) {
344 for (i
= (rdev
->pm
.num_power_states
- 1); i
>= 0; i
--) {
345 if (rdev
->pm
.power_state
[i
].flags
& RADEON_PM_STATE_SINGLE_DISPLAY_ONLY
)
347 else if (i
<= rdev
->pm
.current_power_state_index
) {
348 rdev
->pm
.requested_power_state_index
=
349 rdev
->pm
.current_power_state_index
;
352 rdev
->pm
.requested_power_state_index
= i
;
357 rdev
->pm
.requested_power_state_index
=
358 rdev
->pm
.current_power_state_index
+ 1;
360 rdev
->pm
.requested_clock_mode_index
= 0;
362 case DYNPM_ACTION_DEFAULT
:
363 rdev
->pm
.requested_power_state_index
= rdev
->pm
.default_power_state_index
;
364 rdev
->pm
.requested_clock_mode_index
= 0;
365 rdev
->pm
.dynpm_can_upclock
= false;
367 case DYNPM_ACTION_NONE
:
369 DRM_ERROR("Requested mode for not defined action\n");
373 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
374 /* for now just select the first power state and switch between clock modes */
375 /* power state array is low to high, default is first (0) */
376 if (rdev
->pm
.active_crtc_count
> 1) {
377 rdev
->pm
.requested_power_state_index
= -1;
378 /* start at 1 as we don't want the default mode */
379 for (i
= 1; i
< rdev
->pm
.num_power_states
; i
++) {
380 if (rdev
->pm
.power_state
[i
].flags
& RADEON_PM_STATE_SINGLE_DISPLAY_ONLY
)
382 else if ((rdev
->pm
.power_state
[i
].type
== POWER_STATE_TYPE_PERFORMANCE
) ||
383 (rdev
->pm
.power_state
[i
].type
== POWER_STATE_TYPE_BATTERY
)) {
384 rdev
->pm
.requested_power_state_index
= i
;
388 /* if nothing selected, grab the default state. */
389 if (rdev
->pm
.requested_power_state_index
== -1)
390 rdev
->pm
.requested_power_state_index
= 0;
392 rdev
->pm
.requested_power_state_index
= 1;
394 switch (rdev
->pm
.dynpm_planned_action
) {
395 case DYNPM_ACTION_MINIMUM
:
396 rdev
->pm
.requested_clock_mode_index
= 0;
397 rdev
->pm
.dynpm_can_downclock
= false;
399 case DYNPM_ACTION_DOWNCLOCK
:
400 if (rdev
->pm
.requested_power_state_index
== rdev
->pm
.current_power_state_index
) {
401 if (rdev
->pm
.current_clock_mode_index
== 0) {
402 rdev
->pm
.requested_clock_mode_index
= 0;
403 rdev
->pm
.dynpm_can_downclock
= false;
405 rdev
->pm
.requested_clock_mode_index
=
406 rdev
->pm
.current_clock_mode_index
- 1;
408 rdev
->pm
.requested_clock_mode_index
= 0;
409 rdev
->pm
.dynpm_can_downclock
= false;
411 /* don't use the power state if crtcs are active and no display flag is set */
412 if ((rdev
->pm
.active_crtc_count
> 0) &&
413 (rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
414 clock_info
[rdev
->pm
.requested_clock_mode_index
].flags
&
415 RADEON_PM_MODE_NO_DISPLAY
)) {
416 rdev
->pm
.requested_clock_mode_index
++;
419 case DYNPM_ACTION_UPCLOCK
:
420 if (rdev
->pm
.requested_power_state_index
== rdev
->pm
.current_power_state_index
) {
421 if (rdev
->pm
.current_clock_mode_index
==
422 (rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].num_clock_modes
- 1)) {
423 rdev
->pm
.requested_clock_mode_index
= rdev
->pm
.current_clock_mode_index
;
424 rdev
->pm
.dynpm_can_upclock
= false;
426 rdev
->pm
.requested_clock_mode_index
=
427 rdev
->pm
.current_clock_mode_index
+ 1;
429 rdev
->pm
.requested_clock_mode_index
=
430 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].num_clock_modes
- 1;
431 rdev
->pm
.dynpm_can_upclock
= false;
434 case DYNPM_ACTION_DEFAULT
:
435 rdev
->pm
.requested_power_state_index
= rdev
->pm
.default_power_state_index
;
436 rdev
->pm
.requested_clock_mode_index
= 0;
437 rdev
->pm
.dynpm_can_upclock
= false;
439 case DYNPM_ACTION_NONE
:
441 DRM_ERROR("Requested mode for not defined action\n");
446 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
447 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
448 clock_info
[rdev
->pm
.requested_clock_mode_index
].sclk
,
449 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
450 clock_info
[rdev
->pm
.requested_clock_mode_index
].mclk
,
451 rdev
->pm
.power_state
[rdev
->pm
.requested_power_state_index
].
455 void rs780_pm_init_profile(struct radeon_device
*rdev
)
457 if (rdev
->pm
.num_power_states
== 2) {
459 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
460 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
461 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
462 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
464 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 0;
465 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 0;
466 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
467 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
469 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 0;
470 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 0;
471 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
472 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
474 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 0;
475 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 1;
476 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
477 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
479 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 0;
480 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 0;
481 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
482 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
484 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 0;
485 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 0;
486 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
487 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
489 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 0;
490 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 1;
491 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
492 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
493 } else if (rdev
->pm
.num_power_states
== 3) {
495 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
496 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
497 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
498 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
500 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 1;
501 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 1;
502 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
503 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
505 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 1;
506 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 1;
507 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
508 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
510 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 1;
511 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 2;
512 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
513 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
515 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 1;
516 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 1;
517 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
518 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
520 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 1;
521 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 1;
522 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
523 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
525 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 1;
526 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 2;
527 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
528 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
531 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
532 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
533 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
534 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
536 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 2;
537 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 2;
538 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
539 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
541 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 2;
542 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 2;
543 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
544 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
546 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 2;
547 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 3;
548 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
549 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
551 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 2;
552 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 0;
553 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
554 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
556 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 2;
557 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 0;
558 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
559 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
561 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 2;
562 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 3;
563 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
564 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
568 void r600_pm_init_profile(struct radeon_device
*rdev
)
572 if (rdev
->family
== CHIP_R600
) {
575 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
576 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
577 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
578 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 0;
580 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
581 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
582 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
583 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
585 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
586 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
587 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
588 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 0;
590 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
591 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
592 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
593 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 0;
595 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
596 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
597 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
598 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
600 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
601 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
602 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
603 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 0;
605 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
606 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
607 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
608 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 0;
610 if (rdev
->pm
.num_power_states
< 4) {
612 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
613 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
614 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
615 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 2;
617 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= 1;
618 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= 1;
619 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
620 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
622 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= 1;
623 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= 1;
624 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
625 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 1;
627 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= 1;
628 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= 1;
629 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
630 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 2;
632 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= 2;
633 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= 2;
634 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
635 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
637 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= 2;
638 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= 2;
639 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
640 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 1;
642 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= 2;
643 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= 2;
644 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
645 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 2;
648 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_ps_idx
= rdev
->pm
.default_power_state_index
;
649 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_ps_idx
= rdev
->pm
.default_power_state_index
;
650 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_off_cm_idx
= 0;
651 rdev
->pm
.profiles
[PM_PROFILE_DEFAULT_IDX
].dpms_on_cm_idx
= 2;
653 if (rdev
->flags
& RADEON_IS_MOBILITY
)
654 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 0);
656 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
657 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_ps_idx
= idx
;
658 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_ps_idx
= idx
;
659 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_off_cm_idx
= 0;
660 rdev
->pm
.profiles
[PM_PROFILE_LOW_SH_IDX
].dpms_on_cm_idx
= 0;
662 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_ps_idx
= idx
;
663 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_ps_idx
= idx
;
664 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_off_cm_idx
= 0;
665 rdev
->pm
.profiles
[PM_PROFILE_MID_SH_IDX
].dpms_on_cm_idx
= 1;
667 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 0);
668 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_ps_idx
= idx
;
669 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_ps_idx
= idx
;
670 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_off_cm_idx
= 0;
671 rdev
->pm
.profiles
[PM_PROFILE_HIGH_SH_IDX
].dpms_on_cm_idx
= 2;
673 if (rdev
->flags
& RADEON_IS_MOBILITY
)
674 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_BATTERY
, 1);
676 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
677 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_ps_idx
= idx
;
678 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_ps_idx
= idx
;
679 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_off_cm_idx
= 0;
680 rdev
->pm
.profiles
[PM_PROFILE_LOW_MH_IDX
].dpms_on_cm_idx
= 0;
682 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_ps_idx
= idx
;
683 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_ps_idx
= idx
;
684 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_off_cm_idx
= 0;
685 rdev
->pm
.profiles
[PM_PROFILE_MID_MH_IDX
].dpms_on_cm_idx
= 1;
687 idx
= radeon_pm_get_type_index(rdev
, POWER_STATE_TYPE_PERFORMANCE
, 1);
688 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_ps_idx
= idx
;
689 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_ps_idx
= idx
;
690 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_off_cm_idx
= 0;
691 rdev
->pm
.profiles
[PM_PROFILE_HIGH_MH_IDX
].dpms_on_cm_idx
= 2;
696 void r600_pm_misc(struct radeon_device
*rdev
)
698 int req_ps_idx
= rdev
->pm
.requested_power_state_index
;
699 int req_cm_idx
= rdev
->pm
.requested_clock_mode_index
;
700 struct radeon_power_state
*ps
= &rdev
->pm
.power_state
[req_ps_idx
];
701 struct radeon_voltage
*voltage
= &ps
->clock_info
[req_cm_idx
].voltage
;
703 if ((voltage
->type
== VOLTAGE_SW
) && voltage
->voltage
) {
704 /* 0xff01 is a flag rather then an actual voltage */
705 if (voltage
->voltage
== 0xff01)
707 if (voltage
->voltage
!= rdev
->pm
.current_vddc
) {
708 radeon_atom_set_voltage(rdev
, voltage
->voltage
, SET_VOLTAGE_TYPE_ASIC_VDDC
);
709 rdev
->pm
.current_vddc
= voltage
->voltage
;
710 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage
->voltage
);
715 bool r600_gui_idle(struct radeon_device
*rdev
)
717 if (RREG32(GRBM_STATUS
) & GUI_ACTIVE
)
723 /* hpd for digital panel detect/disconnect */
724 bool r600_hpd_sense(struct radeon_device
*rdev
, enum radeon_hpd_id hpd
)
726 bool connected
= false;
728 if (ASIC_IS_DCE3(rdev
)) {
731 if (RREG32(DC_HPD1_INT_STATUS
) & DC_HPDx_SENSE
)
735 if (RREG32(DC_HPD2_INT_STATUS
) & DC_HPDx_SENSE
)
739 if (RREG32(DC_HPD3_INT_STATUS
) & DC_HPDx_SENSE
)
743 if (RREG32(DC_HPD4_INT_STATUS
) & DC_HPDx_SENSE
)
748 if (RREG32(DC_HPD5_INT_STATUS
) & DC_HPDx_SENSE
)
752 if (RREG32(DC_HPD6_INT_STATUS
) & DC_HPDx_SENSE
)
761 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
765 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
769 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
779 void r600_hpd_set_polarity(struct radeon_device
*rdev
,
780 enum radeon_hpd_id hpd
)
783 bool connected
= r600_hpd_sense(rdev
, hpd
);
785 if (ASIC_IS_DCE3(rdev
)) {
788 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
790 tmp
&= ~DC_HPDx_INT_POLARITY
;
792 tmp
|= DC_HPDx_INT_POLARITY
;
793 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
796 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
798 tmp
&= ~DC_HPDx_INT_POLARITY
;
800 tmp
|= DC_HPDx_INT_POLARITY
;
801 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
804 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
806 tmp
&= ~DC_HPDx_INT_POLARITY
;
808 tmp
|= DC_HPDx_INT_POLARITY
;
809 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
812 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
814 tmp
&= ~DC_HPDx_INT_POLARITY
;
816 tmp
|= DC_HPDx_INT_POLARITY
;
817 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
820 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
822 tmp
&= ~DC_HPDx_INT_POLARITY
;
824 tmp
|= DC_HPDx_INT_POLARITY
;
825 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
829 tmp
= RREG32(DC_HPD6_INT_CONTROL
);
831 tmp
&= ~DC_HPDx_INT_POLARITY
;
833 tmp
|= DC_HPDx_INT_POLARITY
;
834 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
842 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
);
844 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
846 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
847 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
850 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
);
852 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
854 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
855 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
858 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
);
860 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
862 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
863 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
871 void r600_hpd_init(struct radeon_device
*rdev
)
873 struct drm_device
*dev
= rdev
->ddev
;
874 struct drm_connector
*connector
;
877 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
878 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
880 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
||
881 connector
->connector_type
== DRM_MODE_CONNECTOR_LVDS
) {
882 /* don't try to enable hpd on eDP or LVDS avoid breaking the
883 * aux dp channel on imac and help (but not completely fix)
884 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
888 if (ASIC_IS_DCE3(rdev
)) {
889 u32 tmp
= DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
890 if (ASIC_IS_DCE32(rdev
))
893 switch (radeon_connector
->hpd
.hpd
) {
895 WREG32(DC_HPD1_CONTROL
, tmp
);
898 WREG32(DC_HPD2_CONTROL
, tmp
);
901 WREG32(DC_HPD3_CONTROL
, tmp
);
904 WREG32(DC_HPD4_CONTROL
, tmp
);
908 WREG32(DC_HPD5_CONTROL
, tmp
);
911 WREG32(DC_HPD6_CONTROL
, tmp
);
917 switch (radeon_connector
->hpd
.hpd
) {
919 WREG32(DC_HOT_PLUG_DETECT1_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
922 WREG32(DC_HOT_PLUG_DETECT2_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
925 WREG32(DC_HOT_PLUG_DETECT3_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
931 enable
|= 1 << radeon_connector
->hpd
.hpd
;
932 radeon_hpd_set_polarity(rdev
, radeon_connector
->hpd
.hpd
);
934 radeon_irq_kms_enable_hpd(rdev
, enable
);
937 void r600_hpd_fini(struct radeon_device
*rdev
)
939 struct drm_device
*dev
= rdev
->ddev
;
940 struct drm_connector
*connector
;
941 unsigned disable
= 0;
943 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
944 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
945 if (ASIC_IS_DCE3(rdev
)) {
946 switch (radeon_connector
->hpd
.hpd
) {
948 WREG32(DC_HPD1_CONTROL
, 0);
951 WREG32(DC_HPD2_CONTROL
, 0);
954 WREG32(DC_HPD3_CONTROL
, 0);
957 WREG32(DC_HPD4_CONTROL
, 0);
961 WREG32(DC_HPD5_CONTROL
, 0);
964 WREG32(DC_HPD6_CONTROL
, 0);
970 switch (radeon_connector
->hpd
.hpd
) {
972 WREG32(DC_HOT_PLUG_DETECT1_CONTROL
, 0);
975 WREG32(DC_HOT_PLUG_DETECT2_CONTROL
, 0);
978 WREG32(DC_HOT_PLUG_DETECT3_CONTROL
, 0);
984 disable
|= 1 << radeon_connector
->hpd
.hpd
;
986 radeon_irq_kms_disable_hpd(rdev
, disable
);
992 void r600_pcie_gart_tlb_flush(struct radeon_device
*rdev
)
997 /* flush hdp cache so updates hit vram */
998 if ((rdev
->family
>= CHIP_RV770
) && (rdev
->family
<= CHIP_RV740
) &&
999 !(rdev
->flags
& RADEON_IS_AGP
)) {
1000 volatile uint32_t *ptr
= rdev
->gart
.ptr
;
1003 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
1004 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
1005 * This seems to cause problems on some AGP cards. Just use the old
1008 WREG32(HDP_DEBUG1
, 0);
1011 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);
1013 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR
, rdev
->mc
.gtt_start
>> 12);
1014 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR
, (rdev
->mc
.gtt_end
- 1) >> 12);
1015 WREG32(VM_CONTEXT0_REQUEST_RESPONSE
, REQUEST_TYPE(1));
1016 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
1017 /* read MC_STATUS */
1018 tmp
= RREG32(VM_CONTEXT0_REQUEST_RESPONSE
);
1019 tmp
= (tmp
& RESPONSE_TYPE_MASK
) >> RESPONSE_TYPE_SHIFT
;
1021 printk(KERN_WARNING
"[drm] r600 flush TLB failed\n");
1031 int r600_pcie_gart_init(struct radeon_device
*rdev
)
1035 if (rdev
->gart
.robj
) {
1036 WARN(1, "R600 PCIE GART already initialized\n");
1039 /* Initialize common gart structure */
1040 r
= radeon_gart_init(rdev
);
1043 rdev
->gart
.table_size
= rdev
->gart
.num_gpu_pages
* 8;
1044 return radeon_gart_table_vram_alloc(rdev
);
1047 static int r600_pcie_gart_enable(struct radeon_device
*rdev
)
1052 if (rdev
->gart
.robj
== NULL
) {
1053 dev_err(rdev
->dev
, "No VRAM object for PCIE GART.\n");
1056 r
= radeon_gart_table_vram_pin(rdev
);
1060 /* Setup L2 cache */
1061 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
1062 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
1063 EFFECTIVE_L2_QUEUE_SIZE(7));
1064 WREG32(VM_L2_CNTL2
, 0);
1065 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1066 /* Setup TLB control */
1067 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
1068 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
1069 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1070 ENABLE_WAIT_L2_QUERY
;
1071 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
1072 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
1073 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
| ENABLE_L1_STRICT_ORDERING
);
1074 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
1075 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
1076 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
1077 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
1078 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
1079 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
1080 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
1081 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
1082 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
1083 WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL
, tmp
);
1084 WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL
, tmp
);
1085 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
1086 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
1087 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR
, rdev
->mc
.gtt_start
>> 12);
1088 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR
, rdev
->mc
.gtt_end
>> 12);
1089 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, rdev
->gart
.table_addr
>> 12);
1090 WREG32(VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
| PAGE_TABLE_DEPTH(0) |
1091 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
);
1092 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
1093 (u32
)(rdev
->dummy_page
.addr
>> 12));
1094 for (i
= 1; i
< 7; i
++)
1095 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
1097 r600_pcie_gart_tlb_flush(rdev
);
1098 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1099 (unsigned)(rdev
->mc
.gtt_size
>> 20),
1100 (unsigned long long)rdev
->gart
.table_addr
);
1101 rdev
->gart
.ready
= true;
1105 static void r600_pcie_gart_disable(struct radeon_device
*rdev
)
1110 /* Disable all tables */
1111 for (i
= 0; i
< 7; i
++)
1112 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
1114 /* Disable L2 cache */
1115 WREG32(VM_L2_CNTL
, ENABLE_L2_FRAGMENT_PROCESSING
|
1116 EFFECTIVE_L2_QUEUE_SIZE(7));
1117 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1118 /* Setup L1 TLB control */
1119 tmp
= EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1120 ENABLE_WAIT_L2_QUERY
;
1121 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
1122 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
1123 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
1124 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
1125 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
1126 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
1127 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
1128 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
1129 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
);
1130 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
);
1131 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
1132 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
1133 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
);
1134 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
1135 WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL
, tmp
);
1136 WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL
, tmp
);
1137 radeon_gart_table_vram_unpin(rdev
);
1140 static void r600_pcie_gart_fini(struct radeon_device
*rdev
)
1142 radeon_gart_fini(rdev
);
1143 r600_pcie_gart_disable(rdev
);
1144 radeon_gart_table_vram_free(rdev
);
1147 static void r600_agp_enable(struct radeon_device
*rdev
)
1152 /* Setup L2 cache */
1153 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
1154 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
1155 EFFECTIVE_L2_QUEUE_SIZE(7));
1156 WREG32(VM_L2_CNTL2
, 0);
1157 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1158 /* Setup TLB control */
1159 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
1160 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
1161 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1162 ENABLE_WAIT_L2_QUERY
;
1163 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
1164 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
1165 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
| ENABLE_L1_STRICT_ORDERING
);
1166 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
1167 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
1168 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
1169 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
1170 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
1171 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
1172 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
1173 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
1174 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
1175 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
1176 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
1177 for (i
= 0; i
< 7; i
++)
1178 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
1181 int r600_mc_wait_for_idle(struct radeon_device
*rdev
)
1186 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
1187 /* read MC_STATUS */
1188 tmp
= RREG32(R_000E50_SRBM_STATUS
) & 0x3F00;
1196 uint32_t rs780_mc_rreg(struct radeon_device
*rdev
, uint32_t reg
)
1200 spin_lock(&rdev
->mc_idx_lock
);
1201 WREG32(R_0028F8_MC_INDEX
, S_0028F8_MC_IND_ADDR(reg
));
1202 r
= RREG32(R_0028FC_MC_DATA
);
1203 WREG32(R_0028F8_MC_INDEX
, ~C_0028F8_MC_IND_ADDR
);
1204 spin_unlock(&rdev
->mc_idx_lock
);
1208 void rs780_mc_wreg(struct radeon_device
*rdev
, uint32_t reg
, uint32_t v
)
1210 spin_lock(&rdev
->mc_idx_lock
);
1211 WREG32(R_0028F8_MC_INDEX
, S_0028F8_MC_IND_ADDR(reg
) |
1212 S_0028F8_MC_IND_WR_EN(1));
1213 WREG32(R_0028FC_MC_DATA
, v
);
1214 WREG32(R_0028F8_MC_INDEX
, 0x7F);
1215 spin_unlock(&rdev
->mc_idx_lock
);
1218 static void r600_mc_program(struct radeon_device
*rdev
)
1220 struct rv515_mc_save save
;
1224 /* Initialize HDP */
1225 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1226 WREG32((0x2c14 + j
), 0x00000000);
1227 WREG32((0x2c18 + j
), 0x00000000);
1228 WREG32((0x2c1c + j
), 0x00000000);
1229 WREG32((0x2c20 + j
), 0x00000000);
1230 WREG32((0x2c24 + j
), 0x00000000);
1232 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL
, 0);
1234 rv515_mc_stop(rdev
, &save
);
1235 if (r600_mc_wait_for_idle(rdev
)) {
1236 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1238 /* Lockout access through VGA aperture (doesn't exist before R600) */
1239 WREG32(VGA_HDP_CONTROL
, VGA_MEMORY_DISABLE
);
1240 /* Update configuration */
1241 if (rdev
->flags
& RADEON_IS_AGP
) {
1242 if (rdev
->mc
.vram_start
< rdev
->mc
.gtt_start
) {
1243 /* VRAM before AGP */
1244 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1245 rdev
->mc
.vram_start
>> 12);
1246 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1247 rdev
->mc
.gtt_end
>> 12);
1249 /* VRAM after AGP */
1250 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
1251 rdev
->mc
.gtt_start
>> 12);
1252 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
1253 rdev
->mc
.vram_end
>> 12);
1256 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
, rdev
->mc
.vram_start
>> 12);
1257 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
, rdev
->mc
.vram_end
>> 12);
1259 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
, rdev
->vram_scratch
.gpu_addr
>> 12);
1260 tmp
= ((rdev
->mc
.vram_end
>> 24) & 0xFFFF) << 16;
1261 tmp
|= ((rdev
->mc
.vram_start
>> 24) & 0xFFFF);
1262 WREG32(MC_VM_FB_LOCATION
, tmp
);
1263 WREG32(HDP_NONSURFACE_BASE
, (rdev
->mc
.vram_start
>> 8));
1264 WREG32(HDP_NONSURFACE_INFO
, (2 << 7));
1265 WREG32(HDP_NONSURFACE_SIZE
, 0x3FFFFFFF);
1266 if (rdev
->flags
& RADEON_IS_AGP
) {
1267 WREG32(MC_VM_AGP_TOP
, rdev
->mc
.gtt_end
>> 22);
1268 WREG32(MC_VM_AGP_BOT
, rdev
->mc
.gtt_start
>> 22);
1269 WREG32(MC_VM_AGP_BASE
, rdev
->mc
.agp_base
>> 22);
1271 WREG32(MC_VM_AGP_BASE
, 0);
1272 WREG32(MC_VM_AGP_TOP
, 0x0FFFFFFF);
1273 WREG32(MC_VM_AGP_BOT
, 0x0FFFFFFF);
1275 if (r600_mc_wait_for_idle(rdev
)) {
1276 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1278 rv515_mc_resume(rdev
, &save
);
1279 /* we need to own VRAM, so turn off the VGA renderer here
1280 * to stop it overwriting our objects */
1281 rv515_vga_render_disable(rdev
);
1285 * r600_vram_gtt_location - try to find VRAM & GTT location
1286 * @rdev: radeon device structure holding all necessary informations
1287 * @mc: memory controller structure holding memory informations
1289 * Function will place try to place VRAM at same place as in CPU (PCI)
1290 * address space as some GPU seems to have issue when we reprogram at
1291 * different address space.
1293 * If there is not enough space to fit the unvisible VRAM after the
1294 * aperture then we limit the VRAM size to the aperture.
1296 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1297 * them to be in one from GPU point of view so that we can program GPU to
1298 * catch access outside them (weird GPU policy see ??).
1300 * This function will never fails, worst case are limiting VRAM or GTT.
1302 * Note: GTT start, end, size should be initialized before calling this
1303 * function on AGP platform.
1305 static void r600_vram_gtt_location(struct radeon_device
*rdev
, struct radeon_mc
*mc
)
1307 u64 size_bf
, size_af
;
1309 if (mc
->mc_vram_size
> 0xE0000000) {
1310 /* leave room for at least 512M GTT */
1311 dev_warn(rdev
->dev
, "limiting VRAM\n");
1312 mc
->real_vram_size
= 0xE0000000;
1313 mc
->mc_vram_size
= 0xE0000000;
1315 if (rdev
->flags
& RADEON_IS_AGP
) {
1316 size_bf
= mc
->gtt_start
;
1317 size_af
= mc
->mc_mask
- mc
->gtt_end
;
1318 if (size_bf
> size_af
) {
1319 if (mc
->mc_vram_size
> size_bf
) {
1320 dev_warn(rdev
->dev
, "limiting VRAM\n");
1321 mc
->real_vram_size
= size_bf
;
1322 mc
->mc_vram_size
= size_bf
;
1324 mc
->vram_start
= mc
->gtt_start
- mc
->mc_vram_size
;
1326 if (mc
->mc_vram_size
> size_af
) {
1327 dev_warn(rdev
->dev
, "limiting VRAM\n");
1328 mc
->real_vram_size
= size_af
;
1329 mc
->mc_vram_size
= size_af
;
1331 mc
->vram_start
= mc
->gtt_end
+ 1;
1333 mc
->vram_end
= mc
->vram_start
+ mc
->mc_vram_size
- 1;
1334 dev_info(rdev
->dev
, "VRAM: %juM 0x%08jX - 0x%08jX (%juM used)\n",
1335 mc
->mc_vram_size
>> 20, mc
->vram_start
,
1336 mc
->vram_end
, mc
->real_vram_size
>> 20);
1339 if (rdev
->flags
& RADEON_IS_IGP
) {
1340 base
= RREG32(MC_VM_FB_LOCATION
) & 0xFFFF;
1343 radeon_vram_location(rdev
, &rdev
->mc
, base
);
1344 rdev
->mc
.gtt_base_align
= 0;
1345 radeon_gtt_location(rdev
, mc
);
1349 static int r600_mc_init(struct radeon_device
*rdev
)
1352 int chansize
, numchan
;
1353 uint32_t h_addr
, l_addr
;
1354 unsigned long long k8_addr
;
1356 /* Get VRAM informations */
1357 rdev
->mc
.vram_is_ddr
= true;
1358 tmp
= RREG32(RAMCFG
);
1359 if (tmp
& CHANSIZE_OVERRIDE
) {
1361 } else if (tmp
& CHANSIZE_MASK
) {
1366 tmp
= RREG32(CHMAP
);
1367 switch ((tmp
& NOOFCHAN_MASK
) >> NOOFCHAN_SHIFT
) {
1382 rdev
->mc
.vram_width
= numchan
* chansize
;
1383 /* Could aper size report 0 ? */
1384 rdev
->mc
.aper_base
= pci_resource_start(rdev
->pdev
, 0);
1385 rdev
->mc
.aper_size
= pci_resource_len(rdev
->pdev
, 0);
1386 /* Setup GPU memory space */
1387 rdev
->mc
.mc_vram_size
= RREG32(CONFIG_MEMSIZE
);
1388 rdev
->mc
.real_vram_size
= RREG32(CONFIG_MEMSIZE
);
1389 rdev
->mc
.visible_vram_size
= rdev
->mc
.aper_size
;
1390 r600_vram_gtt_location(rdev
, &rdev
->mc
);
1392 if (rdev
->flags
& RADEON_IS_IGP
) {
1393 rs690_pm_info(rdev
);
1394 rdev
->mc
.igp_sideport_enabled
= radeon_atombios_sideport_present(rdev
);
1396 if (rdev
->family
== CHIP_RS780
|| rdev
->family
== CHIP_RS880
) {
1397 /* Use K8 direct mapping for fast fb access. */
1398 rdev
->fastfb_working
= false;
1399 h_addr
= G_000012_K8_ADDR_EXT(RREG32_MC(R_000012_MC_MISC_UMA_CNTL
));
1400 l_addr
= RREG32_MC(R_000011_K8_FB_LOCATION
);
1401 k8_addr
= ((unsigned long long)h_addr
) << 32 | l_addr
;
1402 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
1403 if (k8_addr
+ rdev
->mc
.visible_vram_size
< 0x100000000ULL
)
1406 /* FastFB shall be used with UMA memory. Here it is simply disabled when sideport
1407 * memory is present.
1409 if (rdev
->mc
.igp_sideport_enabled
== false && radeon_fastfb
== 1) {
1410 DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n",
1411 (unsigned long long)rdev
->mc
.aper_base
, k8_addr
);
1412 rdev
->mc
.aper_base
= (resource_size_t
)k8_addr
;
1413 rdev
->fastfb_working
= true;
1419 radeon_update_bandwidth_info(rdev
);
1423 int r600_vram_scratch_init(struct radeon_device
*rdev
)
1426 void *vram_scratch_ptr_ptr
;
1428 if (rdev
->vram_scratch
.robj
== NULL
) {
1429 r
= radeon_bo_create(rdev
, RADEON_GPU_PAGE_SIZE
,
1430 PAGE_SIZE
, true, RADEON_GEM_DOMAIN_VRAM
,
1431 0, NULL
, &rdev
->vram_scratch
.robj
);
1437 r
= radeon_bo_reserve(rdev
->vram_scratch
.robj
, false);
1438 if (unlikely(r
!= 0)) {
1439 radeon_bo_unref(&rdev
->vram_scratch
.robj
);
1442 r
= radeon_bo_pin(rdev
->vram_scratch
.robj
,
1443 RADEON_GEM_DOMAIN_VRAM
, &rdev
->vram_scratch
.gpu_addr
);
1445 radeon_bo_unreserve(rdev
->vram_scratch
.robj
);
1446 radeon_bo_unref(&rdev
->vram_scratch
.robj
);
1449 vram_scratch_ptr_ptr
= &rdev
->vram_scratch
.ptr
;
1450 r
= radeon_bo_kmap(rdev
->vram_scratch
.robj
,
1451 vram_scratch_ptr_ptr
);
1453 radeon_bo_unpin(rdev
->vram_scratch
.robj
);
1454 radeon_bo_unreserve(rdev
->vram_scratch
.robj
);
1456 radeon_bo_unref(&rdev
->vram_scratch
.robj
);
1461 void r600_vram_scratch_fini(struct radeon_device
*rdev
)
1465 if (rdev
->vram_scratch
.robj
== NULL
) {
1468 r
= radeon_bo_reserve(rdev
->vram_scratch
.robj
, false);
1469 if (likely(r
== 0)) {
1470 radeon_bo_kunmap(rdev
->vram_scratch
.robj
);
1471 radeon_bo_unpin(rdev
->vram_scratch
.robj
);
1472 radeon_bo_unreserve(rdev
->vram_scratch
.robj
);
1474 radeon_bo_unref(&rdev
->vram_scratch
.robj
);
1477 void r600_set_bios_scratch_engine_hung(struct radeon_device
*rdev
, bool hung
)
1479 u32 tmp
= RREG32(R600_BIOS_3_SCRATCH
);
1482 tmp
|= ATOM_S3_ASIC_GUI_ENGINE_HUNG
;
1484 tmp
&= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG
;
1486 WREG32(R600_BIOS_3_SCRATCH
, tmp
);
1489 static void r600_print_gpu_status_regs(struct radeon_device
*rdev
)
1491 dev_info(rdev
->dev
, " R_008010_GRBM_STATUS = 0x%08X\n",
1492 RREG32(R_008010_GRBM_STATUS
));
1493 dev_info(rdev
->dev
, " R_008014_GRBM_STATUS2 = 0x%08X\n",
1494 RREG32(R_008014_GRBM_STATUS2
));
1495 dev_info(rdev
->dev
, " R_000E50_SRBM_STATUS = 0x%08X\n",
1496 RREG32(R_000E50_SRBM_STATUS
));
1497 dev_info(rdev
->dev
, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1498 RREG32(CP_STALLED_STAT1
));
1499 dev_info(rdev
->dev
, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1500 RREG32(CP_STALLED_STAT2
));
1501 dev_info(rdev
->dev
, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
1502 RREG32(CP_BUSY_STAT
));
1503 dev_info(rdev
->dev
, " R_008680_CP_STAT = 0x%08X\n",
1505 dev_info(rdev
->dev
, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1506 RREG32(DMA_STATUS_REG
));
1509 static bool r600_is_display_hung(struct radeon_device
*rdev
)
1515 for (i
= 0; i
< rdev
->num_crtc
; i
++) {
1516 if (RREG32(AVIVO_D1CRTC_CONTROL
+ crtc_offsets
[i
]) & AVIVO_CRTC_EN
) {
1517 crtc_status
[i
] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT
+ crtc_offsets
[i
]);
1518 crtc_hung
|= (1 << i
);
1522 for (j
= 0; j
< 10; j
++) {
1523 for (i
= 0; i
< rdev
->num_crtc
; i
++) {
1524 if (crtc_hung
& (1 << i
)) {
1525 tmp
= RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT
+ crtc_offsets
[i
]);
1526 if (tmp
!= crtc_status
[i
])
1527 crtc_hung
&= ~(1 << i
);
1538 u32
r600_gpu_check_soft_reset(struct radeon_device
*rdev
)
1544 tmp
= RREG32(R_008010_GRBM_STATUS
);
1545 if (rdev
->family
>= CHIP_RV770
) {
1546 if (G_008010_PA_BUSY(tmp
) | G_008010_SC_BUSY(tmp
) |
1547 G_008010_SH_BUSY(tmp
) | G_008010_SX_BUSY(tmp
) |
1548 G_008010_TA_BUSY(tmp
) | G_008010_VGT_BUSY(tmp
) |
1549 G_008010_DB03_BUSY(tmp
) | G_008010_CB03_BUSY(tmp
) |
1550 G_008010_SPI03_BUSY(tmp
) | G_008010_VGT_BUSY_NO_DMA(tmp
))
1551 reset_mask
|= RADEON_RESET_GFX
;
1553 if (G_008010_PA_BUSY(tmp
) | G_008010_SC_BUSY(tmp
) |
1554 G_008010_SH_BUSY(tmp
) | G_008010_SX_BUSY(tmp
) |
1555 G_008010_TA03_BUSY(tmp
) | G_008010_VGT_BUSY(tmp
) |
1556 G_008010_DB03_BUSY(tmp
) | G_008010_CB03_BUSY(tmp
) |
1557 G_008010_SPI03_BUSY(tmp
) | G_008010_VGT_BUSY_NO_DMA(tmp
))
1558 reset_mask
|= RADEON_RESET_GFX
;
1561 if (G_008010_CF_RQ_PENDING(tmp
) | G_008010_PF_RQ_PENDING(tmp
) |
1562 G_008010_CP_BUSY(tmp
) | G_008010_CP_COHERENCY_BUSY(tmp
))
1563 reset_mask
|= RADEON_RESET_CP
;
1565 if (G_008010_GRBM_EE_BUSY(tmp
))
1566 reset_mask
|= RADEON_RESET_GRBM
| RADEON_RESET_GFX
| RADEON_RESET_CP
;
1568 /* DMA_STATUS_REG */
1569 tmp
= RREG32(DMA_STATUS_REG
);
1570 if (!(tmp
& DMA_IDLE
))
1571 reset_mask
|= RADEON_RESET_DMA
;
1574 tmp
= RREG32(R_000E50_SRBM_STATUS
);
1575 if (G_000E50_RLC_RQ_PENDING(tmp
) | G_000E50_RLC_BUSY(tmp
))
1576 reset_mask
|= RADEON_RESET_RLC
;
1578 if (G_000E50_IH_BUSY(tmp
))
1579 reset_mask
|= RADEON_RESET_IH
;
1581 if (G_000E50_SEM_BUSY(tmp
))
1582 reset_mask
|= RADEON_RESET_SEM
;
1584 if (G_000E50_GRBM_RQ_PENDING(tmp
))
1585 reset_mask
|= RADEON_RESET_GRBM
;
1587 if (G_000E50_VMC_BUSY(tmp
))
1588 reset_mask
|= RADEON_RESET_VMC
;
1590 if (G_000E50_MCB_BUSY(tmp
) | G_000E50_MCDZ_BUSY(tmp
) |
1591 G_000E50_MCDY_BUSY(tmp
) | G_000E50_MCDX_BUSY(tmp
) |
1592 G_000E50_MCDW_BUSY(tmp
))
1593 reset_mask
|= RADEON_RESET_MC
;
1595 if (r600_is_display_hung(rdev
))
1596 reset_mask
|= RADEON_RESET_DISPLAY
;
1598 /* Skip MC reset as it's mostly likely not hung, just busy */
1599 if (reset_mask
& RADEON_RESET_MC
) {
1600 DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask
);
1601 reset_mask
&= ~RADEON_RESET_MC
;
1607 static void r600_gpu_soft_reset(struct radeon_device
*rdev
, u32 reset_mask
)
1609 struct rv515_mc_save save
;
1610 u32 grbm_soft_reset
= 0, srbm_soft_reset
= 0;
1613 if (reset_mask
== 0)
1616 dev_info(rdev
->dev
, "GPU softreset: 0x%08X\n", reset_mask
);
1618 r600_print_gpu_status_regs(rdev
);
1620 /* Disable CP parsing/prefetching */
1621 if (rdev
->family
>= CHIP_RV770
)
1622 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1624 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1));
1626 /* disable the RLC */
1627 WREG32(RLC_CNTL
, 0);
1629 if (reset_mask
& RADEON_RESET_DMA
) {
1631 tmp
= RREG32(DMA_RB_CNTL
);
1632 tmp
&= ~DMA_RB_ENABLE
;
1633 WREG32(DMA_RB_CNTL
, tmp
);
1638 rv515_mc_stop(rdev
, &save
);
1639 if (r600_mc_wait_for_idle(rdev
)) {
1640 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1643 if (reset_mask
& (RADEON_RESET_GFX
| RADEON_RESET_COMPUTE
)) {
1644 if (rdev
->family
>= CHIP_RV770
)
1645 grbm_soft_reset
|= S_008020_SOFT_RESET_DB(1) |
1646 S_008020_SOFT_RESET_CB(1) |
1647 S_008020_SOFT_RESET_PA(1) |
1648 S_008020_SOFT_RESET_SC(1) |
1649 S_008020_SOFT_RESET_SPI(1) |
1650 S_008020_SOFT_RESET_SX(1) |
1651 S_008020_SOFT_RESET_SH(1) |
1652 S_008020_SOFT_RESET_TC(1) |
1653 S_008020_SOFT_RESET_TA(1) |
1654 S_008020_SOFT_RESET_VC(1) |
1655 S_008020_SOFT_RESET_VGT(1);
1657 grbm_soft_reset
|= S_008020_SOFT_RESET_CR(1) |
1658 S_008020_SOFT_RESET_DB(1) |
1659 S_008020_SOFT_RESET_CB(1) |
1660 S_008020_SOFT_RESET_PA(1) |
1661 S_008020_SOFT_RESET_SC(1) |
1662 S_008020_SOFT_RESET_SMX(1) |
1663 S_008020_SOFT_RESET_SPI(1) |
1664 S_008020_SOFT_RESET_SX(1) |
1665 S_008020_SOFT_RESET_SH(1) |
1666 S_008020_SOFT_RESET_TC(1) |
1667 S_008020_SOFT_RESET_TA(1) |
1668 S_008020_SOFT_RESET_VC(1) |
1669 S_008020_SOFT_RESET_VGT(1);
1672 if (reset_mask
& RADEON_RESET_CP
) {
1673 grbm_soft_reset
|= S_008020_SOFT_RESET_CP(1) |
1674 S_008020_SOFT_RESET_VGT(1);
1676 srbm_soft_reset
|= S_000E60_SOFT_RESET_GRBM(1);
1679 if (reset_mask
& RADEON_RESET_DMA
) {
1680 if (rdev
->family
>= CHIP_RV770
)
1681 srbm_soft_reset
|= RV770_SOFT_RESET_DMA
;
1683 srbm_soft_reset
|= SOFT_RESET_DMA
;
1686 if (reset_mask
& RADEON_RESET_RLC
)
1687 srbm_soft_reset
|= S_000E60_SOFT_RESET_RLC(1);
1689 if (reset_mask
& RADEON_RESET_SEM
)
1690 srbm_soft_reset
|= S_000E60_SOFT_RESET_SEM(1);
1692 if (reset_mask
& RADEON_RESET_IH
)
1693 srbm_soft_reset
|= S_000E60_SOFT_RESET_IH(1);
1695 if (reset_mask
& RADEON_RESET_GRBM
)
1696 srbm_soft_reset
|= S_000E60_SOFT_RESET_GRBM(1);
1698 if (!(rdev
->flags
& RADEON_IS_IGP
)) {
1699 if (reset_mask
& RADEON_RESET_MC
)
1700 srbm_soft_reset
|= S_000E60_SOFT_RESET_MC(1);
1703 if (reset_mask
& RADEON_RESET_VMC
)
1704 srbm_soft_reset
|= S_000E60_SOFT_RESET_VMC(1);
1706 if (grbm_soft_reset
) {
1707 tmp
= RREG32(R_008020_GRBM_SOFT_RESET
);
1708 tmp
|= grbm_soft_reset
;
1709 dev_info(rdev
->dev
, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp
);
1710 WREG32(R_008020_GRBM_SOFT_RESET
, tmp
);
1711 tmp
= RREG32(R_008020_GRBM_SOFT_RESET
);
1715 tmp
&= ~grbm_soft_reset
;
1716 WREG32(R_008020_GRBM_SOFT_RESET
, tmp
);
1717 tmp
= RREG32(R_008020_GRBM_SOFT_RESET
);
1720 if (srbm_soft_reset
) {
1721 tmp
= RREG32(SRBM_SOFT_RESET
);
1722 tmp
|= srbm_soft_reset
;
1723 dev_info(rdev
->dev
, "SRBM_SOFT_RESET=0x%08X\n", tmp
);
1724 WREG32(SRBM_SOFT_RESET
, tmp
);
1725 tmp
= RREG32(SRBM_SOFT_RESET
);
1729 tmp
&= ~srbm_soft_reset
;
1730 WREG32(SRBM_SOFT_RESET
, tmp
);
1731 tmp
= RREG32(SRBM_SOFT_RESET
);
1734 /* Wait a little for things to settle down */
1737 rv515_mc_resume(rdev
, &save
);
1740 r600_print_gpu_status_regs(rdev
);
1743 static void r600_gpu_pci_config_reset(struct radeon_device
*rdev
)
1745 struct rv515_mc_save save
;
1748 dev_info(rdev
->dev
, "GPU pci config reset\n");
1752 /* Disable CP parsing/prefetching */
1753 if (rdev
->family
>= CHIP_RV770
)
1754 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1756 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1));
1758 /* disable the RLC */
1759 WREG32(RLC_CNTL
, 0);
1762 tmp
= RREG32(DMA_RB_CNTL
);
1763 tmp
&= ~DMA_RB_ENABLE
;
1764 WREG32(DMA_RB_CNTL
, tmp
);
1768 /* set mclk/sclk to bypass */
1769 if (rdev
->family
>= CHIP_RV770
)
1770 rv770_set_clk_bypass_mode(rdev
);
1772 pci_disable_busmaster(rdev
->pdev
->dev
.bsddev
);
1773 /* disable mem access */
1774 rv515_mc_stop(rdev
, &save
);
1775 if (r600_mc_wait_for_idle(rdev
)) {
1776 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
1779 /* BIF reset workaround. Not sure if this is needed on 6xx */
1780 tmp
= RREG32(BUS_CNTL
);
1781 tmp
|= VGA_COHE_SPEC_TIMER_DIS
;
1782 WREG32(BUS_CNTL
, tmp
);
1784 tmp
= RREG32(BIF_SCRATCH0
);
1787 radeon_pci_config_reset(rdev
);
1790 /* BIF reset workaround. Not sure if this is needed on 6xx */
1791 tmp
= SOFT_RESET_BIF
;
1792 WREG32(SRBM_SOFT_RESET
, tmp
);
1794 WREG32(SRBM_SOFT_RESET
, 0);
1796 /* wait for asic to come out of reset */
1797 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
1798 if (RREG32(CONFIG_MEMSIZE
) != 0xffffffff)
1804 int r600_asic_reset(struct radeon_device
*rdev
)
1808 reset_mask
= r600_gpu_check_soft_reset(rdev
);
1811 r600_set_bios_scratch_engine_hung(rdev
, true);
1813 /* try soft reset */
1814 r600_gpu_soft_reset(rdev
, reset_mask
);
1816 reset_mask
= r600_gpu_check_soft_reset(rdev
);
1818 /* try pci config reset */
1819 if (reset_mask
&& radeon_hard_reset
)
1820 r600_gpu_pci_config_reset(rdev
);
1822 reset_mask
= r600_gpu_check_soft_reset(rdev
);
1825 r600_set_bios_scratch_engine_hung(rdev
, false);
1831 * r600_gfx_is_lockup - Check if the GFX engine is locked up
1833 * @rdev: radeon_device pointer
1834 * @ring: radeon_ring structure holding ring information
1836 * Check if the GFX engine is locked up.
1837 * Returns true if the engine appears to be locked up, false if not.
1839 bool r600_gfx_is_lockup(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
1841 u32 reset_mask
= r600_gpu_check_soft_reset(rdev
);
1843 if (!(reset_mask
& (RADEON_RESET_GFX
|
1844 RADEON_RESET_COMPUTE
|
1845 RADEON_RESET_CP
))) {
1846 radeon_ring_lockup_update(rdev
, ring
);
1849 return radeon_ring_test_lockup(rdev
, ring
);
1852 u32
r6xx_remap_render_backend(struct radeon_device
*rdev
,
1853 u32 tiling_pipe_num
,
1855 u32 total_max_rb_num
,
1856 u32 disabled_rb_mask
)
1858 u32 rendering_pipe_num
, rb_num_width
, req_rb_num
;
1859 u32 pipe_rb_ratio
, pipe_rb_remain
, tmp
;
1860 u32 data
= 0, mask
= 1 << (max_rb_num
- 1);
1863 /* mask out the RBs that don't exist on that asic */
1864 tmp
= disabled_rb_mask
| ((0xff << max_rb_num
) & 0xff);
1865 /* make sure at least one RB is available */
1866 if ((tmp
& 0xff) != 0xff)
1867 disabled_rb_mask
= tmp
;
1869 rendering_pipe_num
= 1 << tiling_pipe_num
;
1870 req_rb_num
= total_max_rb_num
- r600_count_pipe_bits(disabled_rb_mask
);
1871 BUG_ON(rendering_pipe_num
< req_rb_num
);
1873 pipe_rb_ratio
= rendering_pipe_num
/ req_rb_num
;
1874 pipe_rb_remain
= rendering_pipe_num
- pipe_rb_ratio
* req_rb_num
;
1876 if (rdev
->family
<= CHIP_RV740
) {
1884 for (i
= 0; i
< max_rb_num
; i
++) {
1885 if (!(mask
& disabled_rb_mask
)) {
1886 for (j
= 0; j
< pipe_rb_ratio
; j
++) {
1887 data
<<= rb_num_width
;
1888 data
|= max_rb_num
- i
- 1;
1890 if (pipe_rb_remain
) {
1891 data
<<= rb_num_width
;
1892 data
|= max_rb_num
- i
- 1;
1902 int r600_count_pipe_bits(uint32_t val
)
1904 return hweight32(val
);
1907 static void r600_gpu_init(struct radeon_device
*rdev
)
1911 u32 cc_gc_shader_pipe_config
;
1915 u32 sq_gpr_resource_mgmt_1
= 0;
1916 u32 sq_gpr_resource_mgmt_2
= 0;
1917 u32 sq_thread_resource_mgmt
= 0;
1918 u32 sq_stack_resource_mgmt_1
= 0;
1919 u32 sq_stack_resource_mgmt_2
= 0;
1920 u32 disabled_rb_mask
;
1922 rdev
->config
.r600
.tiling_group_size
= 256;
1923 switch (rdev
->family
) {
1925 rdev
->config
.r600
.max_pipes
= 4;
1926 rdev
->config
.r600
.max_tile_pipes
= 8;
1927 rdev
->config
.r600
.max_simds
= 4;
1928 rdev
->config
.r600
.max_backends
= 4;
1929 rdev
->config
.r600
.max_gprs
= 256;
1930 rdev
->config
.r600
.max_threads
= 192;
1931 rdev
->config
.r600
.max_stack_entries
= 256;
1932 rdev
->config
.r600
.max_hw_contexts
= 8;
1933 rdev
->config
.r600
.max_gs_threads
= 16;
1934 rdev
->config
.r600
.sx_max_export_size
= 128;
1935 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1936 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1937 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1941 rdev
->config
.r600
.max_pipes
= 2;
1942 rdev
->config
.r600
.max_tile_pipes
= 2;
1943 rdev
->config
.r600
.max_simds
= 3;
1944 rdev
->config
.r600
.max_backends
= 1;
1945 rdev
->config
.r600
.max_gprs
= 128;
1946 rdev
->config
.r600
.max_threads
= 192;
1947 rdev
->config
.r600
.max_stack_entries
= 128;
1948 rdev
->config
.r600
.max_hw_contexts
= 8;
1949 rdev
->config
.r600
.max_gs_threads
= 4;
1950 rdev
->config
.r600
.sx_max_export_size
= 128;
1951 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1952 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1953 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1959 rdev
->config
.r600
.max_pipes
= 1;
1960 rdev
->config
.r600
.max_tile_pipes
= 1;
1961 rdev
->config
.r600
.max_simds
= 2;
1962 rdev
->config
.r600
.max_backends
= 1;
1963 rdev
->config
.r600
.max_gprs
= 128;
1964 rdev
->config
.r600
.max_threads
= 192;
1965 rdev
->config
.r600
.max_stack_entries
= 128;
1966 rdev
->config
.r600
.max_hw_contexts
= 4;
1967 rdev
->config
.r600
.max_gs_threads
= 4;
1968 rdev
->config
.r600
.sx_max_export_size
= 128;
1969 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1970 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1971 rdev
->config
.r600
.sq_num_cf_insts
= 1;
1974 rdev
->config
.r600
.max_pipes
= 4;
1975 rdev
->config
.r600
.max_tile_pipes
= 4;
1976 rdev
->config
.r600
.max_simds
= 4;
1977 rdev
->config
.r600
.max_backends
= 4;
1978 rdev
->config
.r600
.max_gprs
= 192;
1979 rdev
->config
.r600
.max_threads
= 192;
1980 rdev
->config
.r600
.max_stack_entries
= 256;
1981 rdev
->config
.r600
.max_hw_contexts
= 8;
1982 rdev
->config
.r600
.max_gs_threads
= 16;
1983 rdev
->config
.r600
.sx_max_export_size
= 128;
1984 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1985 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1986 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1992 /* Initialize HDP */
1993 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1994 WREG32((0x2c14 + j
), 0x00000000);
1995 WREG32((0x2c18 + j
), 0x00000000);
1996 WREG32((0x2c1c + j
), 0x00000000);
1997 WREG32((0x2c20 + j
), 0x00000000);
1998 WREG32((0x2c24 + j
), 0x00000000);
2001 WREG32(GRBM_CNTL
, GRBM_READ_TIMEOUT(0xff));
2005 ramcfg
= RREG32(RAMCFG
);
2006 switch (rdev
->config
.r600
.max_tile_pipes
) {
2008 tiling_config
|= PIPE_TILING(0);
2011 tiling_config
|= PIPE_TILING(1);
2014 tiling_config
|= PIPE_TILING(2);
2017 tiling_config
|= PIPE_TILING(3);
2022 rdev
->config
.r600
.tiling_npipes
= rdev
->config
.r600
.max_tile_pipes
;
2023 rdev
->config
.r600
.tiling_nbanks
= 4 << ((ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
);
2024 tiling_config
|= BANK_TILING((ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
);
2025 tiling_config
|= GROUP_SIZE((ramcfg
& BURSTLENGTH_MASK
) >> BURSTLENGTH_SHIFT
);
2027 tmp
= (ramcfg
& NOOFROWS_MASK
) >> NOOFROWS_SHIFT
;
2029 tiling_config
|= ROW_TILING(3);
2030 tiling_config
|= SAMPLE_SPLIT(3);
2032 tiling_config
|= ROW_TILING(tmp
);
2033 tiling_config
|= SAMPLE_SPLIT(tmp
);
2035 tiling_config
|= BANK_SWAPS(1);
2037 cc_gc_shader_pipe_config
= RREG32(CC_GC_SHADER_PIPE_CONFIG
) & 0x00ffff00;
2038 tmp
= rdev
->config
.r600
.max_simds
-
2039 r600_count_pipe_bits((cc_gc_shader_pipe_config
>> 16) & R6XX_MAX_SIMDS_MASK
);
2040 rdev
->config
.r600
.active_simds
= tmp
;
2042 disabled_rb_mask
= (RREG32(CC_RB_BACKEND_DISABLE
) >> 16) & R6XX_MAX_BACKENDS_MASK
;
2044 for (i
= 0; i
< rdev
->config
.r600
.max_backends
; i
++)
2046 /* if all the backends are disabled, fix it up here */
2047 if ((disabled_rb_mask
& tmp
) == tmp
) {
2048 for (i
= 0; i
< rdev
->config
.r600
.max_backends
; i
++)
2049 disabled_rb_mask
&= ~(1 << i
);
2051 tmp
= (tiling_config
& PIPE_TILING__MASK
) >> PIPE_TILING__SHIFT
;
2052 tmp
= r6xx_remap_render_backend(rdev
, tmp
, rdev
->config
.r600
.max_backends
,
2053 R6XX_MAX_BACKENDS
, disabled_rb_mask
);
2054 tiling_config
|= tmp
<< 16;
2055 rdev
->config
.r600
.backend_map
= tmp
;
2057 rdev
->config
.r600
.tile_config
= tiling_config
;
2058 WREG32(GB_TILING_CONFIG
, tiling_config
);
2059 WREG32(DCP_TILING_CONFIG
, tiling_config
& 0xffff);
2060 WREG32(HDP_TILING_CONFIG
, tiling_config
& 0xffff);
2061 WREG32(DMA_TILING_CONFIG
, tiling_config
& 0xffff);
2063 tmp
= R6XX_MAX_PIPES
- r600_count_pipe_bits((cc_gc_shader_pipe_config
& INACTIVE_QD_PIPES_MASK
) >> 8);
2064 WREG32(VGT_OUT_DEALLOC_CNTL
, (tmp
* 4) & DEALLOC_DIST_MASK
);
2065 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL
, ((tmp
* 4) - 2) & VTX_REUSE_DEPTH_MASK
);
2067 /* Setup some CP states */
2068 WREG32(CP_QUEUE_THRESHOLDS
, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
2069 WREG32(CP_MEQ_THRESHOLDS
, (MEQ_END(0x40) | ROQ_END(0x40)));
2071 WREG32(TA_CNTL_AUX
, (DISABLE_CUBE_ANISO
| SYNC_GRADIENT
|
2072 SYNC_WALKER
| SYNC_ALIGNER
));
2073 /* Setup various GPU states */
2074 if (rdev
->family
== CHIP_RV670
)
2075 WREG32(ARB_GDEC_RD_CNTL
, 0x00000021);
2077 tmp
= RREG32(SX_DEBUG_1
);
2078 tmp
|= SMX_EVENT_RELEASE
;
2079 if ((rdev
->family
> CHIP_R600
))
2080 tmp
|= ENABLE_NEW_SMX_ADDRESS
;
2081 WREG32(SX_DEBUG_1
, tmp
);
2083 if (((rdev
->family
) == CHIP_R600
) ||
2084 ((rdev
->family
) == CHIP_RV630
) ||
2085 ((rdev
->family
) == CHIP_RV610
) ||
2086 ((rdev
->family
) == CHIP_RV620
) ||
2087 ((rdev
->family
) == CHIP_RS780
) ||
2088 ((rdev
->family
) == CHIP_RS880
)) {
2089 WREG32(DB_DEBUG
, PREZ_MUST_WAIT_FOR_POSTZ_DONE
);
2091 WREG32(DB_DEBUG
, 0);
2093 WREG32(DB_WATERMARKS
, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
2094 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
2096 WREG32(PA_SC_MULTI_CHIP_CNTL
, 0);
2097 WREG32(VGT_NUM_INSTANCES
, 0);
2099 WREG32(SPI_CONFIG_CNTL
, GPR_WRITE_PRIORITY(0));
2100 WREG32(SPI_CONFIG_CNTL_1
, VTX_DONE_DELAY(0));
2102 tmp
= RREG32(SQ_MS_FIFO_SIZES
);
2103 if (((rdev
->family
) == CHIP_RV610
) ||
2104 ((rdev
->family
) == CHIP_RV620
) ||
2105 ((rdev
->family
) == CHIP_RS780
) ||
2106 ((rdev
->family
) == CHIP_RS880
)) {
2107 tmp
= (CACHE_FIFO_SIZE(0xa) |
2108 FETCH_FIFO_HIWATER(0xa) |
2109 DONE_FIFO_HIWATER(0xe0) |
2110 ALU_UPDATE_FIFO_HIWATER(0x8));
2111 } else if (((rdev
->family
) == CHIP_R600
) ||
2112 ((rdev
->family
) == CHIP_RV630
)) {
2113 tmp
&= ~DONE_FIFO_HIWATER(0xff);
2114 tmp
|= DONE_FIFO_HIWATER(0x4);
2116 WREG32(SQ_MS_FIFO_SIZES
, tmp
);
2118 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
2119 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
2121 sq_config
= RREG32(SQ_CONFIG
);
2122 sq_config
&= ~(PS_PRIO(3) |
2126 sq_config
|= (DX9_CONSTS
|
2133 if ((rdev
->family
) == CHIP_R600
) {
2134 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(124) |
2136 NUM_CLAUSE_TEMP_GPRS(4));
2137 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(0) |
2139 sq_thread_resource_mgmt
= (NUM_PS_THREADS(136) |
2140 NUM_VS_THREADS(48) |
2143 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(128) |
2144 NUM_VS_STACK_ENTRIES(128));
2145 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(0) |
2146 NUM_ES_STACK_ENTRIES(0));
2147 } else if (((rdev
->family
) == CHIP_RV610
) ||
2148 ((rdev
->family
) == CHIP_RV620
) ||
2149 ((rdev
->family
) == CHIP_RS780
) ||
2150 ((rdev
->family
) == CHIP_RS880
)) {
2151 /* no vertex cache */
2152 sq_config
&= ~VC_ENABLE
;
2154 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
2156 NUM_CLAUSE_TEMP_GPRS(2));
2157 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(17) |
2159 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
2160 NUM_VS_THREADS(78) |
2162 NUM_ES_THREADS(31));
2163 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(40) |
2164 NUM_VS_STACK_ENTRIES(40));
2165 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(32) |
2166 NUM_ES_STACK_ENTRIES(16));
2167 } else if (((rdev
->family
) == CHIP_RV630
) ||
2168 ((rdev
->family
) == CHIP_RV635
)) {
2169 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
2171 NUM_CLAUSE_TEMP_GPRS(2));
2172 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(18) |
2174 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
2175 NUM_VS_THREADS(78) |
2177 NUM_ES_THREADS(31));
2178 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(40) |
2179 NUM_VS_STACK_ENTRIES(40));
2180 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(32) |
2181 NUM_ES_STACK_ENTRIES(16));
2182 } else if ((rdev
->family
) == CHIP_RV670
) {
2183 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
2185 NUM_CLAUSE_TEMP_GPRS(2));
2186 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(17) |
2188 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
2189 NUM_VS_THREADS(78) |
2191 NUM_ES_THREADS(31));
2192 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(64) |
2193 NUM_VS_STACK_ENTRIES(64));
2194 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(64) |
2195 NUM_ES_STACK_ENTRIES(64));
2198 WREG32(SQ_CONFIG
, sq_config
);
2199 WREG32(SQ_GPR_RESOURCE_MGMT_1
, sq_gpr_resource_mgmt_1
);
2200 WREG32(SQ_GPR_RESOURCE_MGMT_2
, sq_gpr_resource_mgmt_2
);
2201 WREG32(SQ_THREAD_RESOURCE_MGMT
, sq_thread_resource_mgmt
);
2202 WREG32(SQ_STACK_RESOURCE_MGMT_1
, sq_stack_resource_mgmt_1
);
2203 WREG32(SQ_STACK_RESOURCE_MGMT_2
, sq_stack_resource_mgmt_2
);
2205 if (((rdev
->family
) == CHIP_RV610
) ||
2206 ((rdev
->family
) == CHIP_RV620
) ||
2207 ((rdev
->family
) == CHIP_RS780
) ||
2208 ((rdev
->family
) == CHIP_RS880
)) {
2209 WREG32(VGT_CACHE_INVALIDATION
, CACHE_INVALIDATION(TC_ONLY
));
2211 WREG32(VGT_CACHE_INVALIDATION
, CACHE_INVALIDATION(VC_AND_TC
));
2214 /* More default values. 2D/3D driver should adjust as needed */
2215 WREG32(PA_SC_AA_SAMPLE_LOCS_2S
, (S0_X(0xc) | S0_Y(0x4) |
2216 S1_X(0x4) | S1_Y(0xc)));
2217 WREG32(PA_SC_AA_SAMPLE_LOCS_4S
, (S0_X(0xe) | S0_Y(0xe) |
2218 S1_X(0x2) | S1_Y(0x2) |
2219 S2_X(0xa) | S2_Y(0x6) |
2220 S3_X(0x6) | S3_Y(0xa)));
2221 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0
, (S0_X(0xe) | S0_Y(0xb) |
2222 S1_X(0x4) | S1_Y(0xc) |
2223 S2_X(0x1) | S2_Y(0x6) |
2224 S3_X(0xa) | S3_Y(0xe)));
2225 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1
, (S4_X(0x6) | S4_Y(0x1) |
2226 S5_X(0x0) | S5_Y(0x0) |
2227 S6_X(0xb) | S6_Y(0x4) |
2228 S7_X(0x7) | S7_Y(0x8)));
2230 WREG32(VGT_STRMOUT_EN
, 0);
2231 tmp
= rdev
->config
.r600
.max_pipes
* 16;
2232 switch (rdev
->family
) {
2248 WREG32(VGT_ES_PER_GS
, 128);
2249 WREG32(VGT_GS_PER_ES
, tmp
);
2250 WREG32(VGT_GS_PER_VS
, 2);
2251 WREG32(VGT_GS_VERTEX_REUSE
, 16);
2253 /* more default values. 2D/3D driver should adjust as needed */
2254 WREG32(PA_SC_LINE_STIPPLE_STATE
, 0);
2255 WREG32(VGT_STRMOUT_EN
, 0);
2257 WREG32(PA_SC_MODE_CNTL
, 0);
2258 WREG32(PA_SC_AA_CONFIG
, 0);
2259 WREG32(PA_SC_LINE_STIPPLE
, 0);
2260 WREG32(SPI_INPUT_Z
, 0);
2261 WREG32(SPI_PS_IN_CONTROL_0
, NUM_INTERP(2));
2262 WREG32(CB_COLOR7_FRAG
, 0);
2264 /* Clear render buffer base addresses */
2265 WREG32(CB_COLOR0_BASE
, 0);
2266 WREG32(CB_COLOR1_BASE
, 0);
2267 WREG32(CB_COLOR2_BASE
, 0);
2268 WREG32(CB_COLOR3_BASE
, 0);
2269 WREG32(CB_COLOR4_BASE
, 0);
2270 WREG32(CB_COLOR5_BASE
, 0);
2271 WREG32(CB_COLOR6_BASE
, 0);
2272 WREG32(CB_COLOR7_BASE
, 0);
2273 WREG32(CB_COLOR7_FRAG
, 0);
2275 switch (rdev
->family
) {
2280 tmp
= TC_L2_SIZE(8);
2284 tmp
= TC_L2_SIZE(4);
2287 tmp
= TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT
;
2290 tmp
= TC_L2_SIZE(0);
2293 WREG32(TC_CNTL
, tmp
);
2295 tmp
= RREG32(HDP_HOST_PATH_CNTL
);
2296 WREG32(HDP_HOST_PATH_CNTL
, tmp
);
2298 tmp
= RREG32(ARB_POP
);
2299 tmp
|= ENABLE_TC128
;
2300 WREG32(ARB_POP
, tmp
);
2302 WREG32(PA_SC_MULTI_CHIP_CNTL
, 0);
2303 WREG32(PA_CL_ENHANCE
, (CLIP_VTX_REORDER_ENA
|
2305 WREG32(PA_SC_ENHANCE
, FORCE_EOV_MAX_CLK_CNT(4095));
2306 WREG32(VC_ENHANCE
, 0);
2311 * Indirect registers accessor
2313 u32
r600_pciep_rreg(struct radeon_device
*rdev
, u32 reg
)
2317 spin_lock(&rdev
->pciep_idx_lock
);
2318 WREG32(PCIE_PORT_INDEX
, ((reg
) & 0xff));
2319 (void)RREG32(PCIE_PORT_INDEX
);
2320 r
= RREG32(PCIE_PORT_DATA
);
2321 spin_unlock(&rdev
->pciep_idx_lock
);
2325 void r600_pciep_wreg(struct radeon_device
*rdev
, u32 reg
, u32 v
)
2327 spin_lock(&rdev
->pciep_idx_lock
);
2328 WREG32(PCIE_PORT_INDEX
, ((reg
) & 0xff));
2329 (void)RREG32(PCIE_PORT_INDEX
);
2330 WREG32(PCIE_PORT_DATA
, (v
));
2331 (void)RREG32(PCIE_PORT_DATA
);
2332 spin_unlock(&rdev
->pciep_idx_lock
);
2338 void r600_cp_stop(struct radeon_device
*rdev
)
2340 if (rdev
->asic
->copy
.copy_ring_index
== RADEON_RING_TYPE_GFX_INDEX
)
2341 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.visible_vram_size
);
2342 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1));
2343 WREG32(SCRATCH_UMSK
, 0);
2344 rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
].ready
= false;
2347 int r600_init_microcode(struct radeon_device
*rdev
)
2349 const char *chip_name
;
2350 const char *rlc_chip_name
;
2351 const char *smc_chip_name
= "RV770";
2352 size_t pfp_req_size
, me_req_size
, rlc_req_size
, smc_req_size
= 0;
2358 switch (rdev
->family
) {
2361 rlc_chip_name
= "R600";
2364 chip_name
= "RV610";
2365 rlc_chip_name
= "R600";
2368 chip_name
= "RV630";
2369 rlc_chip_name
= "R600";
2372 chip_name
= "RV620";
2373 rlc_chip_name
= "R600";
2376 chip_name
= "RV635";
2377 rlc_chip_name
= "R600";
2380 chip_name
= "RV670";
2381 rlc_chip_name
= "R600";
2385 chip_name
= "RS780";
2386 rlc_chip_name
= "R600";
2389 chip_name
= "RV770";
2390 rlc_chip_name
= "R700";
2391 smc_chip_name
= "RV770";
2392 smc_req_size
= ALIGN(RV770_SMC_UCODE_SIZE
, 4);
2395 chip_name
= "RV730";
2396 rlc_chip_name
= "R700";
2397 smc_chip_name
= "RV730";
2398 smc_req_size
= ALIGN(RV730_SMC_UCODE_SIZE
, 4);
2401 chip_name
= "RV710";
2402 rlc_chip_name
= "R700";
2403 smc_chip_name
= "RV710";
2404 smc_req_size
= ALIGN(RV710_SMC_UCODE_SIZE
, 4);
2407 chip_name
= "RV730";
2408 rlc_chip_name
= "R700";
2409 smc_chip_name
= "RV740";
2410 smc_req_size
= ALIGN(RV740_SMC_UCODE_SIZE
, 4);
2413 chip_name
= "CEDAR";
2414 rlc_chip_name
= "CEDAR";
2415 smc_chip_name
= "CEDAR";
2416 smc_req_size
= ALIGN(CEDAR_SMC_UCODE_SIZE
, 4);
2419 chip_name
= "REDWOOD";
2420 rlc_chip_name
= "REDWOOD";
2421 smc_chip_name
= "REDWOOD";
2422 smc_req_size
= ALIGN(REDWOOD_SMC_UCODE_SIZE
, 4);
2425 chip_name
= "JUNIPER";
2426 rlc_chip_name
= "JUNIPER";
2427 smc_chip_name
= "JUNIPER";
2428 smc_req_size
= ALIGN(JUNIPER_SMC_UCODE_SIZE
, 4);
2432 chip_name
= "CYPRESS";
2433 rlc_chip_name
= "CYPRESS";
2434 smc_chip_name
= "CYPRESS";
2435 smc_req_size
= ALIGN(CYPRESS_SMC_UCODE_SIZE
, 4);
2439 rlc_chip_name
= "SUMO";
2443 rlc_chip_name
= "SUMO";
2446 chip_name
= "SUMO2";
2447 rlc_chip_name
= "SUMO";
2452 if (rdev
->family
>= CHIP_CEDAR
) {
2453 pfp_req_size
= EVERGREEN_PFP_UCODE_SIZE
* 4;
2454 me_req_size
= EVERGREEN_PM4_UCODE_SIZE
* 4;
2455 rlc_req_size
= EVERGREEN_RLC_UCODE_SIZE
* 4;
2456 } else if (rdev
->family
>= CHIP_RV770
) {
2457 pfp_req_size
= R700_PFP_UCODE_SIZE
* 4;
2458 me_req_size
= R700_PM4_UCODE_SIZE
* 4;
2459 rlc_req_size
= R700_RLC_UCODE_SIZE
* 4;
2461 pfp_req_size
= R600_PFP_UCODE_SIZE
* 4;
2462 me_req_size
= R600_PM4_UCODE_SIZE
* 12;
2463 rlc_req_size
= R600_RLC_UCODE_SIZE
* 4;
2466 DRM_INFO("Loading %s Microcode\n", chip_name
);
2468 ksnprintf(fw_name
, sizeof(fw_name
), "radeonkmsfw_%s_pfp", chip_name
);
2469 err
= request_firmware(&rdev
->pfp_fw
, fw_name
, rdev
->dev
);
2472 if (rdev
->pfp_fw
->datasize
!= pfp_req_size
) {
2474 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2475 rdev
->pfp_fw
->datasize
, fw_name
);
2480 ksnprintf(fw_name
, sizeof(fw_name
), "radeonkmsfw_%s_me", chip_name
);
2481 err
= request_firmware(&rdev
->me_fw
, fw_name
, rdev
->dev
);
2484 if (rdev
->me_fw
->datasize
!= me_req_size
) {
2486 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2487 rdev
->me_fw
->datasize
, fw_name
);
2491 ksnprintf(fw_name
, sizeof(fw_name
), "radeonkmsfw_%s_rlc", rlc_chip_name
);
2492 err
= request_firmware(&rdev
->rlc_fw
, fw_name
, rdev
->dev
);
2495 if (rdev
->rlc_fw
->datasize
!= rlc_req_size
) {
2497 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2498 rdev
->rlc_fw
->datasize
, fw_name
);
2502 if ((rdev
->family
>= CHIP_RV770
) && (rdev
->family
<= CHIP_HEMLOCK
)) {
2503 ksnprintf(fw_name
, sizeof(fw_name
), "radeonkmsfw_%s_smc", smc_chip_name
);
2504 err
= request_firmware(&rdev
->smc_fw
, fw_name
, rdev
->dev
);
2507 "smc: error loading firmware \"%s\"\n",
2509 release_firmware(rdev
->smc_fw
);
2510 rdev
->smc_fw
= NULL
;
2512 } else if (rdev
->smc_fw
->datasize
!= smc_req_size
) {
2514 "smc: Bogus length %zu in firmware \"%s\"\n",
2515 rdev
->smc_fw
->datasize
, fw_name
);
2524 "r600_cp: Failed to load firmware \"%s\"\n",
2526 release_firmware(rdev
->pfp_fw
);
2527 rdev
->pfp_fw
= NULL
;
2528 release_firmware(rdev
->me_fw
);
2530 release_firmware(rdev
->rlc_fw
);
2531 rdev
->rlc_fw
= NULL
;
2532 release_firmware(rdev
->smc_fw
);
2533 rdev
->smc_fw
= NULL
;
2538 u32
r600_gfx_get_rptr(struct radeon_device
*rdev
,
2539 struct radeon_ring
*ring
)
2543 if (rdev
->wb
.enabled
)
2544 rptr
= rdev
->wb
.wb
[ring
->rptr_offs
/4];
2546 rptr
= RREG32(R600_CP_RB_RPTR
);
2551 u32
r600_gfx_get_wptr(struct radeon_device
*rdev
,
2552 struct radeon_ring
*ring
)
2556 wptr
= RREG32(R600_CP_RB_WPTR
);
2561 void r600_gfx_set_wptr(struct radeon_device
*rdev
,
2562 struct radeon_ring
*ring
)
2564 WREG32(R600_CP_RB_WPTR
, ring
->wptr
);
2565 (void)RREG32(R600_CP_RB_WPTR
);
2569 * r600_fini_microcode - drop the firmwares image references
2571 * @rdev: radeon_device pointer
2573 * Drop the pfp, me and rlc firmwares image references.
2574 * Called at driver shutdown.
2576 void r600_fini_microcode(struct radeon_device
*rdev
)
2578 release_firmware(rdev
->pfp_fw
);
2579 rdev
->pfp_fw
= NULL
;
2580 release_firmware(rdev
->me_fw
);
2582 release_firmware(rdev
->rlc_fw
);
2583 rdev
->rlc_fw
= NULL
;
2584 release_firmware(rdev
->smc_fw
);
2585 rdev
->smc_fw
= NULL
;
2588 static int r600_cp_load_microcode(struct radeon_device
*rdev
)
2590 const __be32
*fw_data
;
2593 if (!rdev
->me_fw
|| !rdev
->pfp_fw
)
2602 RB_NO_UPDATE
| RB_BLKSZ(15) | RB_BUFSZ(3));
2605 WREG32(GRBM_SOFT_RESET
, SOFT_RESET_CP
);
2606 RREG32(GRBM_SOFT_RESET
);
2608 WREG32(GRBM_SOFT_RESET
, 0);
2610 WREG32(CP_ME_RAM_WADDR
, 0);
2612 fw_data
= (const __be32
*)rdev
->me_fw
->data
;
2613 WREG32(CP_ME_RAM_WADDR
, 0);
2614 for (i
= 0; i
< R600_PM4_UCODE_SIZE
* 3; i
++)
2615 WREG32(CP_ME_RAM_DATA
,
2616 be32_to_cpup(fw_data
++));
2618 fw_data
= (const __be32
*)rdev
->pfp_fw
->data
;
2619 WREG32(CP_PFP_UCODE_ADDR
, 0);
2620 for (i
= 0; i
< R600_PFP_UCODE_SIZE
; i
++)
2621 WREG32(CP_PFP_UCODE_DATA
,
2622 be32_to_cpup(fw_data
++));
2624 WREG32(CP_PFP_UCODE_ADDR
, 0);
2625 WREG32(CP_ME_RAM_WADDR
, 0);
2626 WREG32(CP_ME_RAM_RADDR
, 0);
2630 int r600_cp_start(struct radeon_device
*rdev
)
2632 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
2636 r
= radeon_ring_lock(rdev
, ring
, 7);
2638 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
2641 radeon_ring_write(ring
, PACKET3(PACKET3_ME_INITIALIZE
, 5));
2642 radeon_ring_write(ring
, 0x1);
2643 if (rdev
->family
>= CHIP_RV770
) {
2644 radeon_ring_write(ring
, 0x0);
2645 radeon_ring_write(ring
, rdev
->config
.rv770
.max_hw_contexts
- 1);
2647 radeon_ring_write(ring
, 0x3);
2648 radeon_ring_write(ring
, rdev
->config
.r600
.max_hw_contexts
- 1);
2650 radeon_ring_write(ring
, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2651 radeon_ring_write(ring
, 0);
2652 radeon_ring_write(ring
, 0);
2653 radeon_ring_unlock_commit(rdev
, ring
, false);
2656 WREG32(R_0086D8_CP_ME_CNTL
, cp_me
);
2660 int r600_cp_resume(struct radeon_device
*rdev
)
2662 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
2668 WREG32(GRBM_SOFT_RESET
, SOFT_RESET_CP
);
2669 RREG32(GRBM_SOFT_RESET
);
2671 WREG32(GRBM_SOFT_RESET
, 0);
2673 /* Set ring buffer size */
2674 rb_bufsz
= order_base_2(ring
->ring_size
/ 8);
2675 tmp
= (order_base_2(RADEON_GPU_PAGE_SIZE
/8) << 8) | rb_bufsz
;
2677 tmp
|= BUF_SWAP_32BIT
;
2679 WREG32(CP_RB_CNTL
, tmp
);
2680 WREG32(CP_SEM_WAIT_TIMER
, 0x0);
2682 /* Set the write pointer delay */
2683 WREG32(CP_RB_WPTR_DELAY
, 0);
2685 /* Initialize the ring buffer's read and write pointers */
2686 WREG32(CP_RB_CNTL
, tmp
| RB_RPTR_WR_ENA
);
2687 WREG32(CP_RB_RPTR_WR
, 0);
2689 WREG32(CP_RB_WPTR
, ring
->wptr
);
2691 /* set the wb address whether it's enabled or not */
2692 WREG32(CP_RB_RPTR_ADDR
,
2693 ((rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
) & 0xFFFFFFFC));
2694 WREG32(CP_RB_RPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
) & 0xFF);
2695 WREG32(SCRATCH_ADDR
, ((rdev
->wb
.gpu_addr
+ RADEON_WB_SCRATCH_OFFSET
) >> 8) & 0xFFFFFFFF);
2697 if (rdev
->wb
.enabled
)
2698 WREG32(SCRATCH_UMSK
, 0xff);
2700 tmp
|= RB_NO_UPDATE
;
2701 WREG32(SCRATCH_UMSK
, 0);
2705 WREG32(CP_RB_CNTL
, tmp
);
2707 WREG32(CP_RB_BASE
, ring
->gpu_addr
>> 8);
2708 WREG32(CP_DEBUG
, (1 << 27) | (1 << 28));
2710 r600_cp_start(rdev
);
2712 r
= radeon_ring_test(rdev
, RADEON_RING_TYPE_GFX_INDEX
, ring
);
2714 ring
->ready
= false;
2718 if (rdev
->asic
->copy
.copy_ring_index
== RADEON_RING_TYPE_GFX_INDEX
)
2719 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.real_vram_size
);
2724 void r600_ring_init(struct radeon_device
*rdev
, struct radeon_ring
*ring
, unsigned ring_size
)
2729 /* Align ring size */
2730 rb_bufsz
= order_base_2(ring_size
/ 8);
2731 ring_size
= (1 << (rb_bufsz
+ 1)) * 4;
2732 ring
->ring_size
= ring_size
;
2733 ring
->align_mask
= 16 - 1;
2735 if (radeon_ring_supports_scratch_reg(rdev
, ring
)) {
2736 r
= radeon_scratch_get(rdev
, &ring
->rptr_save_reg
);
2738 DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r
);
2739 ring
->rptr_save_reg
= 0;
2744 void r600_cp_fini(struct radeon_device
*rdev
)
2746 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
2748 radeon_ring_fini(rdev
, ring
);
2749 radeon_scratch_free(rdev
, ring
->rptr_save_reg
);
2753 * GPU scratch registers helpers function.
2755 void r600_scratch_init(struct radeon_device
*rdev
)
2759 rdev
->scratch
.num_reg
= 7;
2760 rdev
->scratch
.reg_base
= SCRATCH_REG0
;
2761 for (i
= 0; i
< rdev
->scratch
.num_reg
; i
++) {
2762 rdev
->scratch
.free
[i
] = true;
2763 rdev
->scratch
.reg
[i
] = rdev
->scratch
.reg_base
+ (i
* 4);
2767 int r600_ring_test(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
2774 r
= radeon_scratch_get(rdev
, &scratch
);
2776 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r
);
2779 WREG32(scratch
, 0xCAFEDEAD);
2780 r
= radeon_ring_lock(rdev
, ring
, 3);
2782 DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring
->idx
, r
);
2783 radeon_scratch_free(rdev
, scratch
);
2786 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2787 radeon_ring_write(ring
, ((scratch
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
2788 radeon_ring_write(ring
, 0xDEADBEEF);
2789 radeon_ring_unlock_commit(rdev
, ring
, false);
2790 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
2791 tmp
= RREG32(scratch
);
2792 if (tmp
== 0xDEADBEEF)
2796 if (i
< rdev
->usec_timeout
) {
2797 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring
->idx
, i
);
2799 DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2800 ring
->idx
, scratch
, tmp
);
2803 radeon_scratch_free(rdev
, scratch
);
2808 * CP fences/semaphores
2811 void r600_fence_ring_emit(struct radeon_device
*rdev
,
2812 struct radeon_fence
*fence
)
2814 struct radeon_ring
*ring
= &rdev
->ring
[fence
->ring
];
2815 u32 cp_coher_cntl
= PACKET3_TC_ACTION_ENA
| PACKET3_VC_ACTION_ENA
|
2816 PACKET3_SH_ACTION_ENA
;
2818 if (rdev
->family
>= CHIP_RV770
)
2819 cp_coher_cntl
|= PACKET3_FULL_CACHE_ENA
;
2821 if (rdev
->wb
.use_event
) {
2822 u64 addr
= rdev
->fence_drv
[fence
->ring
].gpu_addr
;
2823 /* flush read cache over gart */
2824 radeon_ring_write(ring
, PACKET3(PACKET3_SURFACE_SYNC
, 3));
2825 radeon_ring_write(ring
, cp_coher_cntl
);
2826 radeon_ring_write(ring
, 0xFFFFFFFF);
2827 radeon_ring_write(ring
, 0);
2828 radeon_ring_write(ring
, 10); /* poll interval */
2829 /* EVENT_WRITE_EOP - flush caches, send int */
2830 radeon_ring_write(ring
, PACKET3(PACKET3_EVENT_WRITE_EOP
, 4));
2831 radeon_ring_write(ring
, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS
) | EVENT_INDEX(5));
2832 radeon_ring_write(ring
, lower_32_bits(addr
));
2833 radeon_ring_write(ring
, (upper_32_bits(addr
) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2834 radeon_ring_write(ring
, fence
->seq
);
2835 radeon_ring_write(ring
, 0);
2837 /* flush read cache over gart */
2838 radeon_ring_write(ring
, PACKET3(PACKET3_SURFACE_SYNC
, 3));
2839 radeon_ring_write(ring
, cp_coher_cntl
);
2840 radeon_ring_write(ring
, 0xFFFFFFFF);
2841 radeon_ring_write(ring
, 0);
2842 radeon_ring_write(ring
, 10); /* poll interval */
2843 radeon_ring_write(ring
, PACKET3(PACKET3_EVENT_WRITE
, 0));
2844 radeon_ring_write(ring
, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT
) | EVENT_INDEX(0));
2845 /* wait for 3D idle clean */
2846 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2847 radeon_ring_write(ring
, (WAIT_UNTIL
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
2848 radeon_ring_write(ring
, WAIT_3D_IDLE_bit
| WAIT_3D_IDLECLEAN_bit
);
2849 /* Emit fence sequence & fire IRQ */
2850 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2851 radeon_ring_write(ring
, ((rdev
->fence_drv
[fence
->ring
].scratch_reg
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
2852 radeon_ring_write(ring
, fence
->seq
);
2853 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2854 radeon_ring_write(ring
, PACKET0(CP_INT_STATUS
, 0));
2855 radeon_ring_write(ring
, RB_INT_STAT
);
2860 * r600_semaphore_ring_emit - emit a semaphore on the CP ring
2862 * @rdev: radeon_device pointer
2863 * @ring: radeon ring buffer object
2864 * @semaphore: radeon semaphore object
2865 * @emit_wait: Is this a sempahore wait?
2867 * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP
2868 * from running ahead of semaphore waits.
2870 bool r600_semaphore_ring_emit(struct radeon_device
*rdev
,
2871 struct radeon_ring
*ring
,
2872 struct radeon_semaphore
*semaphore
,
2875 uint64_t addr
= semaphore
->gpu_addr
;
2876 unsigned sel
= emit_wait
? PACKET3_SEM_SEL_WAIT
: PACKET3_SEM_SEL_SIGNAL
;
2878 if (rdev
->family
< CHIP_CAYMAN
)
2879 sel
|= PACKET3_SEM_WAIT_ON_SIGNAL
;
2881 radeon_ring_write(ring
, PACKET3(PACKET3_MEM_SEMAPHORE
, 1));
2882 radeon_ring_write(ring
, lower_32_bits(addr
));
2883 radeon_ring_write(ring
, (upper_32_bits(addr
) & 0xff) | sel
);
2885 /* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */
2886 if (emit_wait
&& (rdev
->family
>= CHIP_CEDAR
)) {
2887 /* Prevent the PFP from running ahead of the semaphore wait */
2888 radeon_ring_write(ring
, PACKET3(PACKET3_PFP_SYNC_ME
, 0));
2889 radeon_ring_write(ring
, 0x0);
2896 * r600_copy_cpdma - copy pages using the CP DMA engine
2898 * @rdev: radeon_device pointer
2899 * @src_offset: src GPU address
2900 * @dst_offset: dst GPU address
2901 * @num_gpu_pages: number of GPU pages to xfer
2902 * @fence: radeon fence object
2904 * Copy GPU paging using the CP DMA engine (r6xx+).
2905 * Used by the radeon ttm implementation to move pages if
2906 * registered as the asic copy callback.
2908 int r600_copy_cpdma(struct radeon_device
*rdev
,
2909 uint64_t src_offset
, uint64_t dst_offset
,
2910 unsigned num_gpu_pages
,
2911 struct radeon_fence
**fence
)
2913 struct radeon_semaphore
*sem
= NULL
;
2914 int ring_index
= rdev
->asic
->copy
.blit_ring_index
;
2915 struct radeon_ring
*ring
= &rdev
->ring
[ring_index
];
2916 u32 size_in_bytes
, cur_size_in_bytes
, tmp
;
2920 r
= radeon_semaphore_create(rdev
, &sem
);
2922 DRM_ERROR("radeon: moving bo (%d).\n", r
);
2926 size_in_bytes
= (num_gpu_pages
<< RADEON_GPU_PAGE_SHIFT
);
2927 num_loops
= DIV_ROUND_UP(size_in_bytes
, 0x1fffff);
2928 r
= radeon_ring_lock(rdev
, ring
, num_loops
* 6 + 24);
2930 DRM_ERROR("radeon: moving bo (%d).\n", r
);
2931 radeon_semaphore_free(rdev
, &sem
, NULL
);
2935 radeon_semaphore_sync_to(sem
, *fence
);
2936 radeon_semaphore_sync_rings(rdev
, sem
, ring
->idx
);
2938 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2939 radeon_ring_write(ring
, (WAIT_UNTIL
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
2940 radeon_ring_write(ring
, WAIT_3D_IDLE_bit
);
2941 for (i
= 0; i
< num_loops
; i
++) {
2942 cur_size_in_bytes
= size_in_bytes
;
2943 if (cur_size_in_bytes
> 0x1fffff)
2944 cur_size_in_bytes
= 0x1fffff;
2945 size_in_bytes
-= cur_size_in_bytes
;
2946 tmp
= upper_32_bits(src_offset
) & 0xff;
2947 if (size_in_bytes
== 0)
2948 tmp
|= PACKET3_CP_DMA_CP_SYNC
;
2949 radeon_ring_write(ring
, PACKET3(PACKET3_CP_DMA
, 4));
2950 radeon_ring_write(ring
, lower_32_bits(src_offset
));
2951 radeon_ring_write(ring
, tmp
);
2952 radeon_ring_write(ring
, lower_32_bits(dst_offset
));
2953 radeon_ring_write(ring
, upper_32_bits(dst_offset
) & 0xff);
2954 radeon_ring_write(ring
, cur_size_in_bytes
);
2955 src_offset
+= cur_size_in_bytes
;
2956 dst_offset
+= cur_size_in_bytes
;
2958 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
2959 radeon_ring_write(ring
, (WAIT_UNTIL
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
2960 radeon_ring_write(ring
, WAIT_CP_DMA_IDLE_bit
);
2962 r
= radeon_fence_emit(rdev
, fence
, ring
->idx
);
2964 radeon_ring_unlock_undo(rdev
, ring
);
2965 radeon_semaphore_free(rdev
, &sem
, NULL
);
2969 radeon_ring_unlock_commit(rdev
, ring
, false);
2970 radeon_semaphore_free(rdev
, &sem
, *fence
);
2975 int r600_set_surface_reg(struct radeon_device
*rdev
, int reg
,
2976 uint32_t tiling_flags
, uint32_t pitch
,
2977 uint32_t offset
, uint32_t obj_size
)
2979 /* FIXME: implement */
2983 void r600_clear_surface_reg(struct radeon_device
*rdev
, int reg
)
2985 /* FIXME: implement */
2988 static int r600_startup(struct radeon_device
*rdev
)
2990 struct radeon_ring
*ring
;
2993 /* enable pcie gen2 link */
2994 r600_pcie_gen2_enable(rdev
);
2996 /* scratch needs to be initialized before MC */
2997 r
= r600_vram_scratch_init(rdev
);
3001 r600_mc_program(rdev
);
3003 if (rdev
->flags
& RADEON_IS_AGP
) {
3004 r600_agp_enable(rdev
);
3006 r
= r600_pcie_gart_enable(rdev
);
3010 r600_gpu_init(rdev
);
3012 /* allocate wb buffer */
3013 r
= radeon_wb_init(rdev
);
3017 r
= radeon_fence_driver_start_ring(rdev
, RADEON_RING_TYPE_GFX_INDEX
);
3019 dev_err(rdev
->dev
, "failed initializing CP fences (%d).\n", r
);
3023 if (rdev
->has_uvd
) {
3024 r
= uvd_v1_0_resume(rdev
);
3026 r
= radeon_fence_driver_start_ring(rdev
, R600_RING_TYPE_UVD_INDEX
);
3028 dev_err(rdev
->dev
, "failed initializing UVD fences (%d).\n", r
);
3032 rdev
->ring
[R600_RING_TYPE_UVD_INDEX
].ring_size
= 0;
3036 if (!rdev
->irq
.installed
) {
3037 r
= radeon_irq_kms_init(rdev
);
3042 r
= r600_irq_init(rdev
);
3044 DRM_ERROR("radeon: IH init failed (%d).\n", r
);
3045 radeon_irq_kms_fini(rdev
);
3050 ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
3051 r
= radeon_ring_init(rdev
, ring
, ring
->ring_size
, RADEON_WB_CP_RPTR_OFFSET
,
3056 r
= r600_cp_load_microcode(rdev
);
3059 r
= r600_cp_resume(rdev
);
3063 if (rdev
->has_uvd
) {
3064 ring
= &rdev
->ring
[R600_RING_TYPE_UVD_INDEX
];
3065 if (ring
->ring_size
) {
3066 r
= radeon_ring_init(rdev
, ring
, ring
->ring_size
, 0,
3069 r
= uvd_v1_0_init(rdev
);
3071 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r
);
3075 r
= radeon_ib_pool_init(rdev
);
3077 dev_err(rdev
->dev
, "IB initialization failed (%d).\n", r
);
3081 r
= r600_audio_init(rdev
);
3083 DRM_ERROR("radeon: audio init failed\n");
3090 void r600_vga_set_state(struct radeon_device
*rdev
, bool state
)
3094 temp
= RREG32(CONFIG_CNTL
);
3095 if (state
== false) {
3101 WREG32(CONFIG_CNTL
, temp
);
3104 int r600_resume(struct radeon_device
*rdev
)
3108 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
3109 * posting will perform necessary task to bring back GPU into good
3113 atom_asic_init(rdev
->mode_info
.atom_context
);
3115 if (rdev
->pm
.pm_method
== PM_METHOD_DPM
)
3116 radeon_pm_resume(rdev
);
3118 rdev
->accel_working
= true;
3119 r
= r600_startup(rdev
);
3121 DRM_ERROR("r600 startup failed on resume\n");
3122 rdev
->accel_working
= false;
3129 int r600_suspend(struct radeon_device
*rdev
)
3131 radeon_pm_suspend(rdev
);
3132 r600_audio_fini(rdev
);
3134 if (rdev
->has_uvd
) {
3135 uvd_v1_0_fini(rdev
);
3136 radeon_uvd_suspend(rdev
);
3138 r600_irq_suspend(rdev
);
3139 radeon_wb_disable(rdev
);
3140 r600_pcie_gart_disable(rdev
);
3145 /* Plan is to move initialization in that function and use
3146 * helper function so that radeon_device_init pretty much
3147 * do nothing more than calling asic specific function. This
3148 * should also allow to remove a bunch of callback function
3151 int r600_init(struct radeon_device
*rdev
)
3155 if (r600_debugfs_mc_info_init(rdev
)) {
3156 DRM_ERROR("Failed to register debugfs file for mc !\n");
3159 if (!radeon_get_bios(rdev
)) {
3160 if (ASIC_IS_AVIVO(rdev
))
3163 /* Must be an ATOMBIOS */
3164 if (!rdev
->is_atom_bios
) {
3165 dev_err(rdev
->dev
, "Expecting atombios for R600 GPU\n");
3168 r
= radeon_atombios_init(rdev
);
3171 /* Post card if necessary */
3172 if (!radeon_card_posted(rdev
)) {
3174 dev_err(rdev
->dev
, "Card not posted and no BIOS - ignoring\n");
3177 DRM_INFO("GPU not posted. posting now...\n");
3178 atom_asic_init(rdev
->mode_info
.atom_context
);
3180 /* Initialize scratch registers */
3181 r600_scratch_init(rdev
);
3182 /* Initialize surface registers */
3183 radeon_surface_init(rdev
);
3184 /* Initialize clocks */
3185 radeon_get_clock_info(rdev
->ddev
);
3187 r
= radeon_fence_driver_init(rdev
);
3190 if (rdev
->flags
& RADEON_IS_AGP
) {
3191 r
= radeon_agp_init(rdev
);
3193 radeon_agp_disable(rdev
);
3195 r
= r600_mc_init(rdev
);
3198 /* Memory manager */
3199 r
= radeon_bo_init(rdev
);
3203 if (!rdev
->me_fw
|| !rdev
->pfp_fw
|| !rdev
->rlc_fw
) {
3204 r
= r600_init_microcode(rdev
);
3206 DRM_ERROR("Failed to load firmware!\n");
3211 /* Initialize power management */
3212 radeon_pm_init(rdev
);
3214 rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
].ring_obj
= NULL
;
3215 r600_ring_init(rdev
, &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
], 1024 * 1024);
3217 if (rdev
->has_uvd
) {
3218 r
= radeon_uvd_init(rdev
);
3220 rdev
->ring
[R600_RING_TYPE_UVD_INDEX
].ring_obj
= NULL
;
3221 r600_ring_init(rdev
, &rdev
->ring
[R600_RING_TYPE_UVD_INDEX
], 4096);
3225 rdev
->ih
.ring_obj
= NULL
;
3226 r600_ih_ring_init(rdev
, 64 * 1024);
3228 r
= r600_pcie_gart_init(rdev
);
3232 rdev
->accel_working
= true;
3233 r
= r600_startup(rdev
);
3235 dev_err(rdev
->dev
, "disabling GPU acceleration\n");
3237 r600_irq_fini(rdev
);
3238 radeon_wb_fini(rdev
);
3239 radeon_ib_pool_fini(rdev
);
3240 radeon_irq_kms_fini(rdev
);
3241 r600_pcie_gart_fini(rdev
);
3242 rdev
->accel_working
= false;
3248 void r600_fini(struct radeon_device
*rdev
)
3250 radeon_pm_fini(rdev
);
3251 r600_audio_fini(rdev
);
3253 r600_irq_fini(rdev
);
3254 if (rdev
->has_uvd
) {
3255 uvd_v1_0_fini(rdev
);
3256 radeon_uvd_fini(rdev
);
3258 radeon_wb_fini(rdev
);
3259 radeon_ib_pool_fini(rdev
);
3260 radeon_irq_kms_fini(rdev
);
3261 r600_pcie_gart_fini(rdev
);
3262 r600_vram_scratch_fini(rdev
);
3263 radeon_agp_fini(rdev
);
3264 radeon_gem_fini(rdev
);
3265 radeon_fence_driver_fini(rdev
);
3266 radeon_bo_fini(rdev
);
3267 radeon_atombios_fini(rdev
);
3268 r600_fini_microcode(rdev
);
3277 void r600_ring_ib_execute(struct radeon_device
*rdev
, struct radeon_ib
*ib
)
3279 struct radeon_ring
*ring
= &rdev
->ring
[ib
->ring
];
3282 if (ring
->rptr_save_reg
) {
3283 next_rptr
= ring
->wptr
+ 3 + 4;
3284 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
3285 radeon_ring_write(ring
, ((ring
->rptr_save_reg
-
3286 PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
3287 radeon_ring_write(ring
, next_rptr
);
3288 } else if (rdev
->wb
.enabled
) {
3289 next_rptr
= ring
->wptr
+ 5 + 4;
3290 radeon_ring_write(ring
, PACKET3(PACKET3_MEM_WRITE
, 3));
3291 radeon_ring_write(ring
, ring
->next_rptr_gpu_addr
& 0xfffffffc);
3292 radeon_ring_write(ring
, (upper_32_bits(ring
->next_rptr_gpu_addr
) & 0xff) | (1 << 18));
3293 radeon_ring_write(ring
, next_rptr
);
3294 radeon_ring_write(ring
, 0);
3297 radeon_ring_write(ring
, PACKET3(PACKET3_INDIRECT_BUFFER
, 2));
3298 radeon_ring_write(ring
,
3302 (ib
->gpu_addr
& 0xFFFFFFFC));
3303 radeon_ring_write(ring
, upper_32_bits(ib
->gpu_addr
) & 0xFF);
3304 radeon_ring_write(ring
, ib
->length_dw
);
3307 int r600_ib_test(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
3309 struct radeon_ib ib
;
3315 r
= radeon_scratch_get(rdev
, &scratch
);
3317 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r
);
3320 WREG32(scratch
, 0xCAFEDEAD);
3321 r
= radeon_ib_get(rdev
, ring
->idx
, &ib
, NULL
, 256);
3323 DRM_ERROR("radeon: failed to get ib (%d).\n", r
);
3326 ib
.ptr
[0] = PACKET3(PACKET3_SET_CONFIG_REG
, 1);
3327 ib
.ptr
[1] = ((scratch
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
3328 ib
.ptr
[2] = 0xDEADBEEF;
3330 r
= radeon_ib_schedule(rdev
, &ib
, NULL
, false);
3332 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r
);
3335 r
= radeon_fence_wait(ib
.fence
, false);
3337 DRM_ERROR("radeon: fence wait failed (%d).\n", r
);
3340 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
3341 tmp
= RREG32(scratch
);
3342 if (tmp
== 0xDEADBEEF)
3346 if (i
< rdev
->usec_timeout
) {
3347 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib
.fence
->ring
, i
);
3349 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
3354 radeon_ib_free(rdev
, &ib
);
3356 radeon_scratch_free(rdev
, scratch
);
3363 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
3364 * the same as the CP ring buffer, but in reverse. Rather than the CPU
3365 * writing to the ring and the GPU consuming, the GPU writes to the ring
3366 * and host consumes. As the host irq handler processes interrupts, it
3367 * increments the rptr. When the rptr catches up with the wptr, all the
3368 * current interrupts have been processed.
3371 void r600_ih_ring_init(struct radeon_device
*rdev
, unsigned ring_size
)
3375 /* Align ring size */
3376 rb_bufsz
= order_base_2(ring_size
/ 4);
3377 ring_size
= (1 << rb_bufsz
) * 4;
3378 rdev
->ih
.ring_size
= ring_size
;
3379 rdev
->ih
.ptr_mask
= rdev
->ih
.ring_size
- 1;
3383 int r600_ih_ring_alloc(struct radeon_device
*rdev
)
3388 /* Allocate ring buffer */
3389 if (rdev
->ih
.ring_obj
== NULL
) {
3390 r
= radeon_bo_create(rdev
, rdev
->ih
.ring_size
,
3392 RADEON_GEM_DOMAIN_GTT
, 0,
3393 NULL
, &rdev
->ih
.ring_obj
);
3395 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r
);
3398 r
= radeon_bo_reserve(rdev
->ih
.ring_obj
, false);
3399 if (unlikely(r
!= 0)) {
3400 radeon_bo_unref(&rdev
->ih
.ring_obj
);
3403 r
= radeon_bo_pin(rdev
->ih
.ring_obj
,
3404 RADEON_GEM_DOMAIN_GTT
,
3405 &rdev
->ih
.gpu_addr
);
3407 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
3408 radeon_bo_unref(&rdev
->ih
.ring_obj
);
3409 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r
);
3412 ring_ptr
= &rdev
->ih
.ring
;
3413 r
= radeon_bo_kmap(rdev
->ih
.ring_obj
,
3416 radeon_bo_unpin(rdev
->ih
.ring_obj
);
3417 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
3419 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r
);
3420 radeon_bo_unref(&rdev
->ih
.ring_obj
);
3427 void r600_ih_ring_fini(struct radeon_device
*rdev
)
3430 if (rdev
->ih
.ring_obj
) {
3431 r
= radeon_bo_reserve(rdev
->ih
.ring_obj
, false);
3432 if (likely(r
== 0)) {
3433 radeon_bo_kunmap(rdev
->ih
.ring_obj
);
3434 radeon_bo_unpin(rdev
->ih
.ring_obj
);
3435 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
3437 radeon_bo_unref(&rdev
->ih
.ring_obj
);
3438 rdev
->ih
.ring
= NULL
;
3439 rdev
->ih
.ring_obj
= NULL
;
3443 void r600_rlc_stop(struct radeon_device
*rdev
)
3446 if ((rdev
->family
>= CHIP_RV770
) &&
3447 (rdev
->family
<= CHIP_RV740
)) {
3448 /* r7xx asics need to soft reset RLC before halting */
3449 WREG32(SRBM_SOFT_RESET
, SOFT_RESET_RLC
);
3450 RREG32(SRBM_SOFT_RESET
);
3452 WREG32(SRBM_SOFT_RESET
, 0);
3453 RREG32(SRBM_SOFT_RESET
);
3456 WREG32(RLC_CNTL
, 0);
3459 static void r600_rlc_start(struct radeon_device
*rdev
)
3461 WREG32(RLC_CNTL
, RLC_ENABLE
);
3464 static int r600_rlc_resume(struct radeon_device
*rdev
)
3467 const __be32
*fw_data
;
3472 r600_rlc_stop(rdev
);
3474 WREG32(RLC_HB_CNTL
, 0);
3476 WREG32(RLC_HB_BASE
, 0);
3477 WREG32(RLC_HB_RPTR
, 0);
3478 WREG32(RLC_HB_WPTR
, 0);
3479 WREG32(RLC_HB_WPTR_LSB_ADDR
, 0);
3480 WREG32(RLC_HB_WPTR_MSB_ADDR
, 0);
3481 WREG32(RLC_MC_CNTL
, 0);
3482 WREG32(RLC_UCODE_CNTL
, 0);
3484 fw_data
= (const __be32
*)rdev
->rlc_fw
->data
;
3485 if (rdev
->family
>= CHIP_RV770
) {
3486 for (i
= 0; i
< R700_RLC_UCODE_SIZE
; i
++) {
3487 WREG32(RLC_UCODE_ADDR
, i
);
3488 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
3491 for (i
= 0; i
< R600_RLC_UCODE_SIZE
; i
++) {
3492 WREG32(RLC_UCODE_ADDR
, i
);
3493 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
3496 WREG32(RLC_UCODE_ADDR
, 0);
3498 r600_rlc_start(rdev
);
3503 static void r600_enable_interrupts(struct radeon_device
*rdev
)
3505 u32 ih_cntl
= RREG32(IH_CNTL
);
3506 u32 ih_rb_cntl
= RREG32(IH_RB_CNTL
);
3508 ih_cntl
|= ENABLE_INTR
;
3509 ih_rb_cntl
|= IH_RB_ENABLE
;
3510 WREG32(IH_CNTL
, ih_cntl
);
3511 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
3512 rdev
->ih
.enabled
= true;
3515 void r600_disable_interrupts(struct radeon_device
*rdev
)
3517 u32 ih_rb_cntl
= RREG32(IH_RB_CNTL
);
3518 u32 ih_cntl
= RREG32(IH_CNTL
);
3520 ih_rb_cntl
&= ~IH_RB_ENABLE
;
3521 ih_cntl
&= ~ENABLE_INTR
;
3522 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
3523 WREG32(IH_CNTL
, ih_cntl
);
3524 /* set rptr, wptr to 0 */
3525 WREG32(IH_RB_RPTR
, 0);
3526 WREG32(IH_RB_WPTR
, 0);
3527 rdev
->ih
.enabled
= false;
3531 static void r600_disable_interrupt_state(struct radeon_device
*rdev
)
3535 WREG32(CP_INT_CNTL
, CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
);
3536 tmp
= RREG32(DMA_CNTL
) & ~TRAP_ENABLE
;
3537 WREG32(DMA_CNTL
, tmp
);
3538 WREG32(GRBM_INT_CNTL
, 0);
3539 WREG32(DxMODE_INT_MASK
, 0);
3540 WREG32(D1GRPH_INTERRUPT_CONTROL
, 0);
3541 WREG32(D2GRPH_INTERRUPT_CONTROL
, 0);
3542 if (ASIC_IS_DCE3(rdev
)) {
3543 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL
, 0);
3544 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL
, 0);
3545 tmp
= RREG32(DC_HPD1_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3546 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
3547 tmp
= RREG32(DC_HPD2_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3548 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
3549 tmp
= RREG32(DC_HPD3_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3550 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
3551 tmp
= RREG32(DC_HPD4_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3552 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
3553 if (ASIC_IS_DCE32(rdev
)) {
3554 tmp
= RREG32(DC_HPD5_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3555 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
3556 tmp
= RREG32(DC_HPD6_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3557 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
3558 tmp
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3559 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
, tmp
);
3560 tmp
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3561 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
, tmp
);
3563 tmp
= RREG32(HDMI0_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3564 WREG32(HDMI0_AUDIO_PACKET_CONTROL
, tmp
);
3565 tmp
= RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3566 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
, tmp
);
3569 WREG32(DACA_AUTODETECT_INT_CONTROL
, 0);
3570 WREG32(DACB_AUTODETECT_INT_CONTROL
, 0);
3571 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
3572 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
3573 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
3574 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
3575 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
3576 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
3577 tmp
= RREG32(HDMI0_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3578 WREG32(HDMI0_AUDIO_PACKET_CONTROL
, tmp
);
3579 tmp
= RREG32(HDMI1_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3580 WREG32(HDMI1_AUDIO_PACKET_CONTROL
, tmp
);
3584 int r600_irq_init(struct radeon_device
*rdev
)
3588 u32 interrupt_cntl
, ih_cntl
, ih_rb_cntl
;
3591 ret
= r600_ih_ring_alloc(rdev
);
3596 r600_disable_interrupts(rdev
);
3599 if (rdev
->family
>= CHIP_CEDAR
)
3600 ret
= evergreen_rlc_resume(rdev
);
3602 ret
= r600_rlc_resume(rdev
);
3604 r600_ih_ring_fini(rdev
);
3608 /* setup interrupt control */
3609 /* set dummy read address to ring address */
3610 WREG32(INTERRUPT_CNTL2
, rdev
->ih
.gpu_addr
>> 8);
3611 interrupt_cntl
= RREG32(INTERRUPT_CNTL
);
3612 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
3613 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
3615 interrupt_cntl
&= ~IH_DUMMY_RD_OVERRIDE
;
3616 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3617 interrupt_cntl
&= ~IH_REQ_NONSNOOP_EN
;
3618 WREG32(INTERRUPT_CNTL
, interrupt_cntl
);
3620 WREG32(IH_RB_BASE
, rdev
->ih
.gpu_addr
>> 8);
3621 rb_bufsz
= order_base_2(rdev
->ih
.ring_size
/ 4);
3623 ih_rb_cntl
= (IH_WPTR_OVERFLOW_ENABLE
|
3624 IH_WPTR_OVERFLOW_CLEAR
|
3627 if (rdev
->wb
.enabled
)
3628 ih_rb_cntl
|= IH_WPTR_WRITEBACK_ENABLE
;
3630 /* set the writeback address whether it's enabled or not */
3631 WREG32(IH_RB_WPTR_ADDR_LO
, (rdev
->wb
.gpu_addr
+ R600_WB_IH_WPTR_OFFSET
) & 0xFFFFFFFC);
3632 WREG32(IH_RB_WPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ R600_WB_IH_WPTR_OFFSET
) & 0xFF);
3634 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
3636 /* set rptr, wptr to 0 */
3637 WREG32(IH_RB_RPTR
, 0);
3638 WREG32(IH_RB_WPTR
, 0);
3640 /* Default settings for IH_CNTL (disabled at first) */
3641 ih_cntl
= MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
3642 /* RPTR_REARM only works if msi's are enabled */
3643 if (rdev
->msi_enabled
)
3644 ih_cntl
|= RPTR_REARM
;
3645 WREG32(IH_CNTL
, ih_cntl
);
3647 /* force the active interrupt state to all disabled */
3648 if (rdev
->family
>= CHIP_CEDAR
)
3649 evergreen_disable_interrupt_state(rdev
);
3651 r600_disable_interrupt_state(rdev
);
3653 /* at this point everything should be setup correctly to enable master */
3654 pci_enable_busmaster(rdev
->dev
->bsddev
);
3657 r600_enable_interrupts(rdev
);
3662 void r600_irq_suspend(struct radeon_device
*rdev
)
3664 r600_irq_disable(rdev
);
3665 r600_rlc_stop(rdev
);
3668 void r600_irq_fini(struct radeon_device
*rdev
)
3670 r600_irq_suspend(rdev
);
3671 r600_ih_ring_fini(rdev
);
3674 int r600_irq_set(struct radeon_device
*rdev
)
3676 u32 cp_int_cntl
= CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
;
3678 u32 hpd1
, hpd2
, hpd3
, hpd4
= 0, hpd5
= 0, hpd6
= 0;
3679 u32 grbm_int_cntl
= 0;
3682 u32 thermal_int
= 0;
3684 if (!rdev
->irq
.installed
) {
3685 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3688 /* don't enable anything if the ih is disabled */
3689 if (!rdev
->ih
.enabled
) {
3690 r600_disable_interrupts(rdev
);
3691 /* force the active interrupt state to all disabled */
3692 r600_disable_interrupt_state(rdev
);
3696 if (ASIC_IS_DCE3(rdev
)) {
3697 hpd1
= RREG32(DC_HPD1_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3698 hpd2
= RREG32(DC_HPD2_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3699 hpd3
= RREG32(DC_HPD3_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3700 hpd4
= RREG32(DC_HPD4_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3701 if (ASIC_IS_DCE32(rdev
)) {
3702 hpd5
= RREG32(DC_HPD5_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3703 hpd6
= RREG32(DC_HPD6_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3704 hdmi0
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
) & ~AFMT_AZ_FORMAT_WTRIG_MASK
;
3705 hdmi1
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
) & ~AFMT_AZ_FORMAT_WTRIG_MASK
;
3707 hdmi0
= RREG32(HDMI0_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3708 hdmi1
= RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3711 hpd1
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3712 hpd2
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3713 hpd3
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3714 hdmi0
= RREG32(HDMI0_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3715 hdmi1
= RREG32(HDMI1_AUDIO_PACKET_CONTROL
) & ~HDMI0_AZ_FORMAT_WTRIG_MASK
;
3718 dma_cntl
= RREG32(DMA_CNTL
) & ~TRAP_ENABLE
;
3720 if ((rdev
->family
> CHIP_R600
) && (rdev
->family
< CHIP_RV770
)) {
3721 thermal_int
= RREG32(CG_THERMAL_INT
) &
3722 ~(THERM_INT_MASK_HIGH
| THERM_INT_MASK_LOW
);
3723 } else if (rdev
->family
>= CHIP_RV770
) {
3724 thermal_int
= RREG32(RV770_CG_THERMAL_INT
) &
3725 ~(THERM_INT_MASK_HIGH
| THERM_INT_MASK_LOW
);
3727 if (rdev
->irq
.dpm_thermal
) {
3728 DRM_DEBUG("dpm thermal\n");
3729 thermal_int
|= THERM_INT_MASK_HIGH
| THERM_INT_MASK_LOW
;
3732 if (atomic_read(&rdev
->irq
.ring_int
[RADEON_RING_TYPE_GFX_INDEX
])) {
3733 DRM_DEBUG("r600_irq_set: sw int\n");
3734 cp_int_cntl
|= RB_INT_ENABLE
;
3735 cp_int_cntl
|= TIME_STAMP_INT_ENABLE
;
3738 if (atomic_read(&rdev
->irq
.ring_int
[R600_RING_TYPE_DMA_INDEX
])) {
3739 DRM_DEBUG("r600_irq_set: sw int dma\n");
3740 dma_cntl
|= TRAP_ENABLE
;
3743 if (rdev
->irq
.crtc_vblank_int
[0] ||
3744 atomic_read(&rdev
->irq
.pflip
[0])) {
3745 DRM_DEBUG("r600_irq_set: vblank 0\n");
3746 mode_int
|= D1MODE_VBLANK_INT_MASK
;
3748 if (rdev
->irq
.crtc_vblank_int
[1] ||
3749 atomic_read(&rdev
->irq
.pflip
[1])) {
3750 DRM_DEBUG("r600_irq_set: vblank 1\n");
3751 mode_int
|= D2MODE_VBLANK_INT_MASK
;
3753 if (rdev
->irq
.hpd
[0]) {
3754 DRM_DEBUG("r600_irq_set: hpd 1\n");
3755 hpd1
|= DC_HPDx_INT_EN
;
3757 if (rdev
->irq
.hpd
[1]) {
3758 DRM_DEBUG("r600_irq_set: hpd 2\n");
3759 hpd2
|= DC_HPDx_INT_EN
;
3761 if (rdev
->irq
.hpd
[2]) {
3762 DRM_DEBUG("r600_irq_set: hpd 3\n");
3763 hpd3
|= DC_HPDx_INT_EN
;
3765 if (rdev
->irq
.hpd
[3]) {
3766 DRM_DEBUG("r600_irq_set: hpd 4\n");
3767 hpd4
|= DC_HPDx_INT_EN
;
3769 if (rdev
->irq
.hpd
[4]) {
3770 DRM_DEBUG("r600_irq_set: hpd 5\n");
3771 hpd5
|= DC_HPDx_INT_EN
;
3773 if (rdev
->irq
.hpd
[5]) {
3774 DRM_DEBUG("r600_irq_set: hpd 6\n");
3775 hpd6
|= DC_HPDx_INT_EN
;
3777 if (rdev
->irq
.afmt
[0]) {
3778 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3779 hdmi0
|= HDMI0_AZ_FORMAT_WTRIG_MASK
;
3781 if (rdev
->irq
.afmt
[1]) {
3782 DRM_DEBUG("r600_irq_set: hdmi 0\n");
3783 hdmi1
|= HDMI0_AZ_FORMAT_WTRIG_MASK
;
3786 WREG32(CP_INT_CNTL
, cp_int_cntl
);
3787 WREG32(DMA_CNTL
, dma_cntl
);
3788 WREG32(DxMODE_INT_MASK
, mode_int
);
3789 WREG32(D1GRPH_INTERRUPT_CONTROL
, DxGRPH_PFLIP_INT_MASK
);
3790 WREG32(D2GRPH_INTERRUPT_CONTROL
, DxGRPH_PFLIP_INT_MASK
);
3791 WREG32(GRBM_INT_CNTL
, grbm_int_cntl
);
3792 if (ASIC_IS_DCE3(rdev
)) {
3793 WREG32(DC_HPD1_INT_CONTROL
, hpd1
);
3794 WREG32(DC_HPD2_INT_CONTROL
, hpd2
);
3795 WREG32(DC_HPD3_INT_CONTROL
, hpd3
);
3796 WREG32(DC_HPD4_INT_CONTROL
, hpd4
);
3797 if (ASIC_IS_DCE32(rdev
)) {
3798 WREG32(DC_HPD5_INT_CONTROL
, hpd5
);
3799 WREG32(DC_HPD6_INT_CONTROL
, hpd6
);
3800 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
, hdmi0
);
3801 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
, hdmi1
);
3803 WREG32(HDMI0_AUDIO_PACKET_CONTROL
, hdmi0
);
3804 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
, hdmi1
);
3807 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, hpd1
);
3808 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, hpd2
);
3809 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, hpd3
);
3810 WREG32(HDMI0_AUDIO_PACKET_CONTROL
, hdmi0
);
3811 WREG32(HDMI1_AUDIO_PACKET_CONTROL
, hdmi1
);
3813 if ((rdev
->family
> CHIP_R600
) && (rdev
->family
< CHIP_RV770
)) {
3814 WREG32(CG_THERMAL_INT
, thermal_int
);
3815 } else if (rdev
->family
>= CHIP_RV770
) {
3816 WREG32(RV770_CG_THERMAL_INT
, thermal_int
);
3822 static void r600_irq_ack(struct radeon_device
*rdev
)
3826 if (ASIC_IS_DCE3(rdev
)) {
3827 rdev
->irq
.stat_regs
.r600
.disp_int
= RREG32(DCE3_DISP_INTERRUPT_STATUS
);
3828 rdev
->irq
.stat_regs
.r600
.disp_int_cont
= RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE
);
3829 rdev
->irq
.stat_regs
.r600
.disp_int_cont2
= RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2
);
3830 if (ASIC_IS_DCE32(rdev
)) {
3831 rdev
->irq
.stat_regs
.r600
.hdmi0_status
= RREG32(AFMT_STATUS
+ DCE3_HDMI_OFFSET0
);
3832 rdev
->irq
.stat_regs
.r600
.hdmi1_status
= RREG32(AFMT_STATUS
+ DCE3_HDMI_OFFSET1
);
3834 rdev
->irq
.stat_regs
.r600
.hdmi0_status
= RREG32(HDMI0_STATUS
);
3835 rdev
->irq
.stat_regs
.r600
.hdmi1_status
= RREG32(DCE3_HDMI1_STATUS
);
3838 rdev
->irq
.stat_regs
.r600
.disp_int
= RREG32(DISP_INTERRUPT_STATUS
);
3839 rdev
->irq
.stat_regs
.r600
.disp_int_cont
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE
);
3840 rdev
->irq
.stat_regs
.r600
.disp_int_cont2
= 0;
3841 rdev
->irq
.stat_regs
.r600
.hdmi0_status
= RREG32(HDMI0_STATUS
);
3842 rdev
->irq
.stat_regs
.r600
.hdmi1_status
= RREG32(HDMI1_STATUS
);
3844 rdev
->irq
.stat_regs
.r600
.d1grph_int
= RREG32(D1GRPH_INTERRUPT_STATUS
);
3845 rdev
->irq
.stat_regs
.r600
.d2grph_int
= RREG32(D2GRPH_INTERRUPT_STATUS
);
3847 if (rdev
->irq
.stat_regs
.r600
.d1grph_int
& DxGRPH_PFLIP_INT_OCCURRED
)
3848 WREG32(D1GRPH_INTERRUPT_STATUS
, DxGRPH_PFLIP_INT_CLEAR
);
3849 if (rdev
->irq
.stat_regs
.r600
.d2grph_int
& DxGRPH_PFLIP_INT_OCCURRED
)
3850 WREG32(D2GRPH_INTERRUPT_STATUS
, DxGRPH_PFLIP_INT_CLEAR
);
3851 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D1_VBLANK_INTERRUPT
)
3852 WREG32(D1MODE_VBLANK_STATUS
, DxMODE_VBLANK_ACK
);
3853 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D1_VLINE_INTERRUPT
)
3854 WREG32(D1MODE_VLINE_STATUS
, DxMODE_VLINE_ACK
);
3855 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D2_VBLANK_INTERRUPT
)
3856 WREG32(D2MODE_VBLANK_STATUS
, DxMODE_VBLANK_ACK
);
3857 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D2_VLINE_INTERRUPT
)
3858 WREG32(D2MODE_VLINE_STATUS
, DxMODE_VLINE_ACK
);
3859 if (rdev
->irq
.stat_regs
.r600
.disp_int
& DC_HPD1_INTERRUPT
) {
3860 if (ASIC_IS_DCE3(rdev
)) {
3861 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
3862 tmp
|= DC_HPDx_INT_ACK
;
3863 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
3865 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
);
3866 tmp
|= DC_HPDx_INT_ACK
;
3867 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
3870 if (rdev
->irq
.stat_regs
.r600
.disp_int
& DC_HPD2_INTERRUPT
) {
3871 if (ASIC_IS_DCE3(rdev
)) {
3872 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
3873 tmp
|= DC_HPDx_INT_ACK
;
3874 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
3876 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
);
3877 tmp
|= DC_HPDx_INT_ACK
;
3878 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
3881 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont
& DC_HPD3_INTERRUPT
) {
3882 if (ASIC_IS_DCE3(rdev
)) {
3883 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
3884 tmp
|= DC_HPDx_INT_ACK
;
3885 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
3887 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
);
3888 tmp
|= DC_HPDx_INT_ACK
;
3889 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
3892 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont
& DC_HPD4_INTERRUPT
) {
3893 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
3894 tmp
|= DC_HPDx_INT_ACK
;
3895 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
3897 if (ASIC_IS_DCE32(rdev
)) {
3898 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont2
& DC_HPD5_INTERRUPT
) {
3899 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
3900 tmp
|= DC_HPDx_INT_ACK
;
3901 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
3903 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont2
& DC_HPD6_INTERRUPT
) {
3904 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
3905 tmp
|= DC_HPDx_INT_ACK
;
3906 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
3908 if (rdev
->irq
.stat_regs
.r600
.hdmi0_status
& AFMT_AZ_FORMAT_WTRIG
) {
3909 tmp
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
);
3910 tmp
|= AFMT_AZ_FORMAT_WTRIG_ACK
;
3911 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET0
, tmp
);
3913 if (rdev
->irq
.stat_regs
.r600
.hdmi1_status
& AFMT_AZ_FORMAT_WTRIG
) {
3914 tmp
= RREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
);
3915 tmp
|= AFMT_AZ_FORMAT_WTRIG_ACK
;
3916 WREG32(AFMT_AUDIO_PACKET_CONTROL
+ DCE3_HDMI_OFFSET1
, tmp
);
3919 if (rdev
->irq
.stat_regs
.r600
.hdmi0_status
& HDMI0_AZ_FORMAT_WTRIG
) {
3920 tmp
= RREG32(HDMI0_AUDIO_PACKET_CONTROL
);
3921 tmp
|= HDMI0_AZ_FORMAT_WTRIG_ACK
;
3922 WREG32(HDMI0_AUDIO_PACKET_CONTROL
, tmp
);
3924 if (rdev
->irq
.stat_regs
.r600
.hdmi1_status
& HDMI0_AZ_FORMAT_WTRIG
) {
3925 if (ASIC_IS_DCE3(rdev
)) {
3926 tmp
= RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
);
3927 tmp
|= HDMI0_AZ_FORMAT_WTRIG_ACK
;
3928 WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL
, tmp
);
3930 tmp
= RREG32(HDMI1_AUDIO_PACKET_CONTROL
);
3931 tmp
|= HDMI0_AZ_FORMAT_WTRIG_ACK
;
3932 WREG32(HDMI1_AUDIO_PACKET_CONTROL
, tmp
);
3938 void r600_irq_disable(struct radeon_device
*rdev
)
3940 r600_disable_interrupts(rdev
);
3941 /* Wait and acknowledge irq */
3944 r600_disable_interrupt_state(rdev
);
3947 static u32
r600_get_ih_wptr(struct radeon_device
*rdev
)
3951 if (rdev
->wb
.enabled
)
3952 wptr
= le32_to_cpu(rdev
->wb
.wb
[R600_WB_IH_WPTR_OFFSET
/4]);
3954 wptr
= RREG32(IH_RB_WPTR
);
3956 if (wptr
& RB_OVERFLOW
) {
3957 wptr
&= ~RB_OVERFLOW
;
3958 /* When a ring buffer overflow happen start parsing interrupt
3959 * from the last not overwritten vector (wptr + 16). Hopefully
3960 * this should allow us to catchup.
3962 dev_warn(rdev
->dev
, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
3963 wptr
, rdev
->ih
.rptr
, (wptr
+ 16) & rdev
->ih
.ptr_mask
);
3964 rdev
->ih
.rptr
= (wptr
+ 16) & rdev
->ih
.ptr_mask
;
3965 tmp
= RREG32(IH_RB_CNTL
);
3966 tmp
|= IH_WPTR_OVERFLOW_CLEAR
;
3967 WREG32(IH_RB_CNTL
, tmp
);
3969 return (wptr
& rdev
->ih
.ptr_mask
);
3973 * Each IV ring entry is 128 bits:
3974 * [7:0] - interrupt source id
3976 * [59:32] - interrupt source data
3977 * [127:60] - reserved
3979 * The basic interrupt vector entries
3980 * are decoded as follows:
3981 * src_id src_data description
3986 * 19 0 FP Hot plug detection A
3987 * 19 1 FP Hot plug detection B
3988 * 19 2 DAC A auto-detection
3989 * 19 3 DAC B auto-detection
3995 * 181 - EOP Interrupt
3998 * Note, these are based on r600 and may need to be
3999 * adjusted or added to on newer asics
4002 irqreturn_t
r600_irq_process(struct radeon_device
*rdev
)
4006 u32 src_id
, src_data
;
4008 bool queue_hotplug
= false;
4009 bool queue_hdmi
= false;
4010 bool queue_thermal
= false;
4012 if (!rdev
->ih
.enabled
|| rdev
->shutdown
)
4015 /* No MSIs, need a dummy read to flush PCI DMAs */
4016 if (!rdev
->msi_enabled
)
4019 wptr
= r600_get_ih_wptr(rdev
);
4022 /* is somebody else already processing irqs? */
4023 if (atomic_xchg(&rdev
->ih
.lock
, 1))
4026 rptr
= rdev
->ih
.rptr
;
4027 DRM_DEBUG_VBLANK("r600_irq_process start: rptr %d, wptr %d\n", rptr
, wptr
);
4029 /* Order reading of wptr vs. reading of IH ring data */
4032 /* display interrupts */
4035 while (rptr
!= wptr
) {
4036 /* wptr/rptr are in bytes! */
4037 ring_index
= rptr
/ 4;
4038 src_id
= le32_to_cpu(rdev
->ih
.ring
[ring_index
]) & 0xff;
4039 src_data
= le32_to_cpu(rdev
->ih
.ring
[ring_index
+ 1]) & 0xfffffff;
4042 case 1: /* D1 vblank/vline */
4044 case 0: /* D1 vblank */
4045 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D1_VBLANK_INTERRUPT
) {
4046 if (rdev
->irq
.crtc_vblank_int
[0]) {
4047 drm_handle_vblank(rdev
->ddev
, 0);
4048 rdev
->pm
.vblank_sync
= true;
4049 wake_up(&rdev
->irq
.vblank_queue
);
4051 if (atomic_read(&rdev
->irq
.pflip
[0]))
4052 radeon_crtc_handle_vblank(rdev
, 0);
4053 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~LB_D1_VBLANK_INTERRUPT
;
4054 DRM_DEBUG_VBLANK("IH: D1 vblank\n");
4057 case 1: /* D1 vline */
4058 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D1_VLINE_INTERRUPT
) {
4059 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~LB_D1_VLINE_INTERRUPT
;
4060 DRM_DEBUG_VBLANK("IH: D1 vline\n");
4064 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
4068 case 5: /* D2 vblank/vline */
4070 case 0: /* D2 vblank */
4071 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D2_VBLANK_INTERRUPT
) {
4072 if (rdev
->irq
.crtc_vblank_int
[1]) {
4073 drm_handle_vblank(rdev
->ddev
, 1);
4074 rdev
->pm
.vblank_sync
= true;
4075 wake_up(&rdev
->irq
.vblank_queue
);
4077 if (atomic_read(&rdev
->irq
.pflip
[1]))
4078 radeon_crtc_handle_vblank(rdev
, 1);
4079 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~LB_D2_VBLANK_INTERRUPT
;
4080 DRM_DEBUG_VBLANK("IH: D2 vblank\n");
4083 case 1: /* D1 vline */
4084 if (rdev
->irq
.stat_regs
.r600
.disp_int
& LB_D2_VLINE_INTERRUPT
) {
4085 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~LB_D2_VLINE_INTERRUPT
;
4086 DRM_DEBUG_VBLANK("IH: D2 vline\n");
4090 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
4094 case 9: /* D1 pflip */
4095 DRM_DEBUG_VBLANK("IH: D1 flip\n");
4096 if (radeon_use_pflipirq
> 0)
4097 radeon_crtc_handle_flip(rdev
, 0);
4099 case 11: /* D2 pflip */
4100 DRM_DEBUG_VBLANK("IH: D2 flip\n");
4101 if (radeon_use_pflipirq
> 0)
4102 radeon_crtc_handle_flip(rdev
, 1);
4104 case 19: /* HPD/DAC hotplug */
4107 if (rdev
->irq
.stat_regs
.r600
.disp_int
& DC_HPD1_INTERRUPT
) {
4108 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~DC_HPD1_INTERRUPT
;
4109 queue_hotplug
= true;
4110 DRM_DEBUG("IH: HPD1\n");
4114 if (rdev
->irq
.stat_regs
.r600
.disp_int
& DC_HPD2_INTERRUPT
) {
4115 rdev
->irq
.stat_regs
.r600
.disp_int
&= ~DC_HPD2_INTERRUPT
;
4116 queue_hotplug
= true;
4117 DRM_DEBUG("IH: HPD2\n");
4121 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont
& DC_HPD3_INTERRUPT
) {
4122 rdev
->irq
.stat_regs
.r600
.disp_int_cont
&= ~DC_HPD3_INTERRUPT
;
4123 queue_hotplug
= true;
4124 DRM_DEBUG("IH: HPD3\n");
4128 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont
& DC_HPD4_INTERRUPT
) {
4129 rdev
->irq
.stat_regs
.r600
.disp_int_cont
&= ~DC_HPD4_INTERRUPT
;
4130 queue_hotplug
= true;
4131 DRM_DEBUG("IH: HPD4\n");
4135 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont2
& DC_HPD5_INTERRUPT
) {
4136 rdev
->irq
.stat_regs
.r600
.disp_int_cont2
&= ~DC_HPD5_INTERRUPT
;
4137 queue_hotplug
= true;
4138 DRM_DEBUG("IH: HPD5\n");
4142 if (rdev
->irq
.stat_regs
.r600
.disp_int_cont2
& DC_HPD6_INTERRUPT
) {
4143 rdev
->irq
.stat_regs
.r600
.disp_int_cont2
&= ~DC_HPD6_INTERRUPT
;
4144 queue_hotplug
= true;
4145 DRM_DEBUG("IH: HPD6\n");
4149 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
4156 if (rdev
->irq
.stat_regs
.r600
.hdmi0_status
& HDMI0_AZ_FORMAT_WTRIG
) {
4157 rdev
->irq
.stat_regs
.r600
.hdmi0_status
&= ~HDMI0_AZ_FORMAT_WTRIG
;
4159 DRM_DEBUG("IH: HDMI0\n");
4163 if (rdev
->irq
.stat_regs
.r600
.hdmi1_status
& HDMI0_AZ_FORMAT_WTRIG
) {
4164 rdev
->irq
.stat_regs
.r600
.hdmi1_status
&= ~HDMI0_AZ_FORMAT_WTRIG
;
4166 DRM_DEBUG("IH: HDMI1\n");
4170 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id
, src_data
);
4175 DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data
);
4176 radeon_fence_process(rdev
, R600_RING_TYPE_UVD_INDEX
);
4178 case 176: /* CP_INT in ring buffer */
4179 case 177: /* CP_INT in IB1 */
4180 case 178: /* CP_INT in IB2 */
4181 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data
);
4182 radeon_fence_process(rdev
, RADEON_RING_TYPE_GFX_INDEX
);
4184 case 181: /* CP EOP event */
4185 DRM_DEBUG("IH: CP EOP\n");
4186 radeon_fence_process(rdev
, RADEON_RING_TYPE_GFX_INDEX
);
4188 case 224: /* DMA trap event */
4189 DRM_DEBUG("IH: DMA trap\n");
4190 radeon_fence_process(rdev
, R600_RING_TYPE_DMA_INDEX
);
4192 case 230: /* thermal low to high */
4193 DRM_DEBUG("IH: thermal low to high\n");
4194 rdev
->pm
.dpm
.thermal
.high_to_low
= false;
4195 queue_thermal
= true;
4197 case 231: /* thermal high to low */
4198 DRM_DEBUG("IH: thermal high to low\n");
4199 rdev
->pm
.dpm
.thermal
.high_to_low
= true;
4200 queue_thermal
= true;
4202 case 233: /* GUI IDLE */
4203 DRM_DEBUG("IH: GUI idle\n");
4206 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
4210 /* wptr/rptr are in bytes! */
4212 rptr
&= rdev
->ih
.ptr_mask
;
4213 WREG32(IH_RB_RPTR
, rptr
);
4216 taskqueue_enqueue(rdev
->tq
, &rdev
->hotplug_work
);
4218 taskqueue_enqueue(rdev
->tq
, &rdev
->audio_work
);
4219 if (queue_thermal
&& rdev
->pm
.dpm_enabled
)
4220 taskqueue_enqueue(rdev
->tq
, &rdev
->pm
.dpm
.thermal
.work
);
4221 rdev
->ih
.rptr
= rptr
;
4222 atomic_set(&rdev
->ih
.lock
, 0);
4224 /* make sure wptr hasn't changed while processing */
4225 wptr
= r600_get_ih_wptr(rdev
);
4235 #if defined(CONFIG_DEBUG_FS)
4237 static int r600_debugfs_mc_info(struct seq_file
*m
, void *data
)
4239 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
4240 struct drm_device
*dev
= node
->minor
->dev
;
4241 struct radeon_device
*rdev
= dev
->dev_private
;
4243 DREG32_SYS(m
, rdev
, R_000E50_SRBM_STATUS
);
4244 DREG32_SYS(m
, rdev
, VM_L2_STATUS
);
4248 static struct drm_info_list r600_mc_info_list
[] = {
4249 {"r600_mc_info", r600_debugfs_mc_info
, 0, NULL
},
4253 int r600_debugfs_mc_info_init(struct radeon_device
*rdev
)
4255 #if defined(CONFIG_DEBUG_FS)
4256 return radeon_debugfs_add_files(rdev
, r600_mc_info_list
, ARRAY_SIZE(r600_mc_info_list
));
4263 * r600_mmio_hdp_flush - flush Host Data Path cache via MMIO
4264 * rdev: radeon device structure
4266 * Some R6XX/R7XX don't seem to take into account HDP flushes performed
4267 * through the ring buffer. This leads to corruption in rendering, see
4268 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we
4269 * directly perform the HDP flush by writing the register through MMIO.
4271 void r600_mmio_hdp_flush(struct radeon_device
*rdev
)
4273 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
4274 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
4275 * This seems to cause problems on some AGP cards. Just use the old
4278 if ((rdev
->family
>= CHIP_RV770
) && (rdev
->family
<= CHIP_RV740
) &&
4279 rdev
->vram_scratch
.ptr
&& !(rdev
->flags
& RADEON_IS_AGP
)) {
4280 volatile uint32_t *ptr
= rdev
->vram_scratch
.ptr
;
4283 WREG32(HDP_DEBUG1
, 0);
4286 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);
4289 void r600_set_pcie_lanes(struct radeon_device
*rdev
, int lanes
)
4291 u32 link_width_cntl
, mask
;
4293 if (rdev
->flags
& RADEON_IS_IGP
)
4296 if (!(rdev
->flags
& RADEON_IS_PCIE
))
4299 /* x2 cards have a special sequence */
4300 if (ASIC_IS_X2(rdev
))
4303 radeon_gui_idle(rdev
);
4307 mask
= RADEON_PCIE_LC_LINK_WIDTH_X0
;
4310 mask
= RADEON_PCIE_LC_LINK_WIDTH_X1
;
4313 mask
= RADEON_PCIE_LC_LINK_WIDTH_X2
;
4316 mask
= RADEON_PCIE_LC_LINK_WIDTH_X4
;
4319 mask
= RADEON_PCIE_LC_LINK_WIDTH_X8
;
4322 /* not actually supported */
4323 mask
= RADEON_PCIE_LC_LINK_WIDTH_X12
;
4326 mask
= RADEON_PCIE_LC_LINK_WIDTH_X16
;
4329 DRM_ERROR("invalid pcie lane request: %d\n", lanes
);
4333 link_width_cntl
= RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
4334 link_width_cntl
&= ~RADEON_PCIE_LC_LINK_WIDTH_MASK
;
4335 link_width_cntl
|= mask
<< RADEON_PCIE_LC_LINK_WIDTH_SHIFT
;
4336 link_width_cntl
|= (RADEON_PCIE_LC_RECONFIG_NOW
|
4337 R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE
);
4339 WREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
4342 int r600_get_pcie_lanes(struct radeon_device
*rdev
)
4344 u32 link_width_cntl
;
4346 if (rdev
->flags
& RADEON_IS_IGP
)
4349 if (!(rdev
->flags
& RADEON_IS_PCIE
))
4352 /* x2 cards have a special sequence */
4353 if (ASIC_IS_X2(rdev
))
4356 radeon_gui_idle(rdev
);
4358 link_width_cntl
= RREG32_PCIE_PORT(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
4360 switch ((link_width_cntl
& RADEON_PCIE_LC_LINK_WIDTH_RD_MASK
) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT
) {
4361 case RADEON_PCIE_LC_LINK_WIDTH_X1
:
4363 case RADEON_PCIE_LC_LINK_WIDTH_X2
:
4365 case RADEON_PCIE_LC_LINK_WIDTH_X4
:
4367 case RADEON_PCIE_LC_LINK_WIDTH_X8
:
4369 case RADEON_PCIE_LC_LINK_WIDTH_X12
:
4370 /* not actually supported */
4372 case RADEON_PCIE_LC_LINK_WIDTH_X0
:
4373 case RADEON_PCIE_LC_LINK_WIDTH_X16
:
4379 static void r600_pcie_gen2_enable(struct radeon_device
*rdev
)
4381 u32 link_width_cntl
, lanes
, speed_cntl
, training_cntl
, tmp
;
4386 if (radeon_pcie_gen2
== 0)
4389 if (rdev
->flags
& RADEON_IS_IGP
)
4392 if (!(rdev
->flags
& RADEON_IS_PCIE
))
4395 /* x2 cards have a special sequence */
4396 if (ASIC_IS_X2(rdev
))
4399 /* only RV6xx+ chips are supported */
4400 if (rdev
->family
<= CHIP_R600
)
4403 ret
= drm_pcie_get_speed_cap_mask(rdev
->ddev
, &mask
);
4407 if (!(mask
& DRM_PCIE_SPEED_50
))
4410 speed_cntl
= RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
);
4411 if (speed_cntl
& LC_CURRENT_DATA_RATE
) {
4412 DRM_INFO("PCIE gen 2 link speeds already enabled\n");
4416 DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
4418 /* 55 nm r6xx asics */
4419 if ((rdev
->family
== CHIP_RV670
) ||
4420 (rdev
->family
== CHIP_RV620
) ||
4421 (rdev
->family
== CHIP_RV635
)) {
4422 /* advertise upconfig capability */
4423 link_width_cntl
= RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL
);
4424 link_width_cntl
&= ~LC_UPCONFIGURE_DIS
;
4425 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
4426 link_width_cntl
= RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL
);
4427 if (link_width_cntl
& LC_RENEGOTIATION_SUPPORT
) {
4428 lanes
= (link_width_cntl
& LC_LINK_WIDTH_RD_MASK
) >> LC_LINK_WIDTH_RD_SHIFT
;
4429 link_width_cntl
&= ~(LC_LINK_WIDTH_MASK
|
4430 LC_RECONFIG_ARC_MISSING_ESCAPE
);
4431 link_width_cntl
|= lanes
| LC_RECONFIG_NOW
| LC_RENEGOTIATE_EN
;
4432 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
4434 link_width_cntl
|= LC_UPCONFIGURE_DIS
;
4435 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
4439 speed_cntl
= RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
);
4440 if ((speed_cntl
& LC_OTHER_SIDE_EVER_SENT_GEN2
) &&
4441 (speed_cntl
& LC_OTHER_SIDE_SUPPORTS_GEN2
)) {
4443 /* 55 nm r6xx asics */
4444 if ((rdev
->family
== CHIP_RV670
) ||
4445 (rdev
->family
== CHIP_RV620
) ||
4446 (rdev
->family
== CHIP_RV635
)) {
4447 WREG32(MM_CFGREGS_CNTL
, 0x8);
4448 link_cntl2
= RREG32(0x4088);
4449 WREG32(MM_CFGREGS_CNTL
, 0);
4450 /* not supported yet */
4451 if (link_cntl2
& SELECTABLE_DEEMPHASIS
)
4455 speed_cntl
&= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK
;
4456 speed_cntl
|= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT
);
4457 speed_cntl
&= ~LC_VOLTAGE_TIMER_SEL_MASK
;
4458 speed_cntl
&= ~LC_FORCE_DIS_HW_SPEED_CHANGE
;
4459 speed_cntl
|= LC_FORCE_EN_HW_SPEED_CHANGE
;
4460 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
, speed_cntl
);
4462 tmp
= RREG32(0x541c);
4463 WREG32(0x541c, tmp
| 0x8);
4464 WREG32(MM_CFGREGS_CNTL
, MM_WR_TO_CFG_EN
);
4465 link_cntl2
= RREG16(0x4088);
4466 link_cntl2
&= ~TARGET_LINK_SPEED_MASK
;
4468 WREG16(0x4088, link_cntl2
);
4469 WREG32(MM_CFGREGS_CNTL
, 0);
4471 if ((rdev
->family
== CHIP_RV670
) ||
4472 (rdev
->family
== CHIP_RV620
) ||
4473 (rdev
->family
== CHIP_RV635
)) {
4474 training_cntl
= RREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL
);
4475 training_cntl
&= ~LC_POINT_7_PLUS_EN
;
4476 WREG32_PCIE_PORT(PCIE_LC_TRAINING_CNTL
, training_cntl
);
4478 speed_cntl
= RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
);
4479 speed_cntl
&= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN
;
4480 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
, speed_cntl
);
4483 speed_cntl
= RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
);
4484 speed_cntl
|= LC_GEN2_EN_STRAP
;
4485 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
, speed_cntl
);
4488 link_width_cntl
= RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL
);
4489 /* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
4491 link_width_cntl
|= LC_UPCONFIGURE_DIS
;
4493 link_width_cntl
&= ~LC_UPCONFIGURE_DIS
;
4494 WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
4499 * r600_get_gpu_clock_counter - return GPU clock counter snapshot
4501 * @rdev: radeon_device pointer
4503 * Fetches a GPU clock counter snapshot (R6xx-cayman).
4504 * Returns the 64 bit clock counter snapshot.
4506 uint64_t r600_get_gpu_clock_counter(struct radeon_device
*rdev
)
4510 spin_lock(&rdev
->gpu_clock_mutex
);
4511 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT
, 1);
4512 clock
= (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB
) |
4513 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB
) << 32ULL);
4514 spin_unlock(&rdev
->gpu_clock_mutex
);