2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include "radeon_reg.h"
33 #include "atom-bits.h"
35 /* rs690,rs740 depends on : */
36 void r100_hdp_reset(struct radeon_device
*rdev
);
37 int r300_mc_wait_for_idle(struct radeon_device
*rdev
);
38 void r420_pipes_init(struct radeon_device
*rdev
);
39 void rs400_gart_disable(struct radeon_device
*rdev
);
40 int rs400_gart_enable(struct radeon_device
*rdev
);
41 void rs400_gart_adjust_size(struct radeon_device
*rdev
);
42 void rs600_mc_disable_clients(struct radeon_device
*rdev
);
43 void rs600_disable_vga(struct radeon_device
*rdev
);
45 /* This files gather functions specifics to :
48 * Some of these functions might be used by newer ASICs.
50 void rs690_gpu_init(struct radeon_device
*rdev
);
51 int rs690_mc_wait_for_idle(struct radeon_device
*rdev
);
57 int rs690_mc_init(struct radeon_device
*rdev
)
62 if (r100_debugfs_rbbm_init(rdev
)) {
63 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
67 rs400_gart_disable(rdev
);
69 /* Setup GPU memory space */
70 rdev
->mc
.gtt_location
= rdev
->mc
.mc_vram_size
;
71 rdev
->mc
.gtt_location
+= (rdev
->mc
.gtt_size
- 1);
72 rdev
->mc
.gtt_location
&= ~(rdev
->mc
.gtt_size
- 1);
73 rdev
->mc
.vram_location
= 0xFFFFFFFFUL
;
74 r
= radeon_mc_setup(rdev
);
79 /* Program GPU memory space */
80 rs600_mc_disable_clients(rdev
);
81 if (rs690_mc_wait_for_idle(rdev
)) {
82 printk(KERN_WARNING
"Failed to wait MC idle while "
83 "programming pipes. Bad things might happen.\n");
85 tmp
= rdev
->mc
.vram_location
+ rdev
->mc
.mc_vram_size
- 1;
86 tmp
= REG_SET(RS690_MC_FB_TOP
, tmp
>> 16);
87 tmp
|= REG_SET(RS690_MC_FB_START
, rdev
->mc
.vram_location
>> 16);
88 WREG32_MC(RS690_MCCFG_FB_LOCATION
, tmp
);
89 /* FIXME: Does this reg exist on RS480,RS740 ? */
90 WREG32(0x310, rdev
->mc
.vram_location
);
91 WREG32(RS690_HDP_FB_LOCATION
, rdev
->mc
.vram_location
>> 16);
95 void rs690_mc_fini(struct radeon_device
*rdev
)
101 * Global GPU functions
103 int rs690_mc_wait_for_idle(struct radeon_device
*rdev
)
108 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
110 tmp
= RREG32_MC(RS690_MC_STATUS
);
111 if (tmp
& RS690_MC_STATUS_IDLE
) {
119 void rs690_errata(struct radeon_device
*rdev
)
121 rdev
->pll_errata
= 0;
124 void rs690_gpu_init(struct radeon_device
*rdev
)
126 /* FIXME: HDP same place on rs690 ? */
127 r100_hdp_reset(rdev
);
128 rs600_disable_vga(rdev
);
129 /* FIXME: is this correct ? */
130 r420_pipes_init(rdev
);
131 if (rs690_mc_wait_for_idle(rdev
)) {
132 printk(KERN_WARNING
"Failed to wait MC idle while "
133 "programming pipes. Bad things might happen.\n");
141 void rs690_pm_info(struct radeon_device
*rdev
)
143 int index
= GetIndexIntoMasterTable(DATA
, IntegratedSystemInfo
);
144 struct _ATOM_INTEGRATED_SYSTEM_INFO
*info
;
145 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2
*info_v2
;
147 uint16_t data_offset
;
151 atom_parse_data_header(rdev
->mode_info
.atom_context
, index
, NULL
,
152 &frev
, &crev
, &data_offset
);
153 ptr
= rdev
->mode_info
.atom_context
->bios
+ data_offset
;
154 info
= (struct _ATOM_INTEGRATED_SYSTEM_INFO
*)ptr
;
155 info_v2
= (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2
*)ptr
;
156 /* Get various system informations from bios */
159 tmp
.full
= rfixed_const(100);
160 rdev
->pm
.igp_sideport_mclk
.full
= rfixed_const(info
->ulBootUpMemoryClock
);
161 rdev
->pm
.igp_sideport_mclk
.full
= rfixed_div(rdev
->pm
.igp_sideport_mclk
, tmp
);
162 rdev
->pm
.igp_system_mclk
.full
= rfixed_const(le16_to_cpu(info
->usK8MemoryClock
));
163 rdev
->pm
.igp_ht_link_clk
.full
= rfixed_const(le16_to_cpu(info
->usFSBClock
));
164 rdev
->pm
.igp_ht_link_width
.full
= rfixed_const(info
->ucHTLinkWidth
);
167 tmp
.full
= rfixed_const(100);
168 rdev
->pm
.igp_sideport_mclk
.full
= rfixed_const(info_v2
->ulBootUpSidePortClock
);
169 rdev
->pm
.igp_sideport_mclk
.full
= rfixed_div(rdev
->pm
.igp_sideport_mclk
, tmp
);
170 rdev
->pm
.igp_system_mclk
.full
= rfixed_const(info_v2
->ulBootUpUMAClock
);
171 rdev
->pm
.igp_system_mclk
.full
= rfixed_div(rdev
->pm
.igp_system_mclk
, tmp
);
172 rdev
->pm
.igp_ht_link_clk
.full
= rfixed_const(info_v2
->ulHTLinkFreq
);
173 rdev
->pm
.igp_ht_link_clk
.full
= rfixed_div(rdev
->pm
.igp_ht_link_clk
, tmp
);
174 rdev
->pm
.igp_ht_link_width
.full
= rfixed_const(le16_to_cpu(info_v2
->usMinHTLinkWidth
));
177 tmp
.full
= rfixed_const(100);
178 /* We assume the slower possible clock ie worst case */
180 rdev
->pm
.igp_sideport_mclk
.full
= rfixed_const(333);
181 /* FIXME: system clock ? */
182 rdev
->pm
.igp_system_mclk
.full
= rfixed_const(100);
183 rdev
->pm
.igp_system_mclk
.full
= rfixed_div(rdev
->pm
.igp_system_mclk
, tmp
);
184 rdev
->pm
.igp_ht_link_clk
.full
= rfixed_const(200);
185 rdev
->pm
.igp_ht_link_width
.full
= rfixed_const(8);
186 DRM_ERROR("No integrated system info for your GPU, using safe default\n");
189 /* Compute various bandwidth */
190 /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */
191 tmp
.full
= rfixed_const(4);
192 rdev
->pm
.k8_bandwidth
.full
= rfixed_mul(rdev
->pm
.igp_system_mclk
, tmp
);
193 /* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8
194 * = ht_clk * ht_width / 5
196 tmp
.full
= rfixed_const(5);
197 rdev
->pm
.ht_bandwidth
.full
= rfixed_mul(rdev
->pm
.igp_ht_link_clk
,
198 rdev
->pm
.igp_ht_link_width
);
199 rdev
->pm
.ht_bandwidth
.full
= rfixed_div(rdev
->pm
.ht_bandwidth
, tmp
);
200 if (tmp
.full
< rdev
->pm
.max_bandwidth
.full
) {
201 /* HT link is a limiting factor */
202 rdev
->pm
.max_bandwidth
.full
= tmp
.full
;
204 /* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7
205 * = (sideport_clk * 14) / 10
207 tmp
.full
= rfixed_const(14);
208 rdev
->pm
.sideport_bandwidth
.full
= rfixed_mul(rdev
->pm
.igp_sideport_mclk
, tmp
);
209 tmp
.full
= rfixed_const(10);
210 rdev
->pm
.sideport_bandwidth
.full
= rfixed_div(rdev
->pm
.sideport_bandwidth
, tmp
);
213 void rs690_vram_info(struct radeon_device
*rdev
)
218 rs400_gart_adjust_size(rdev
);
219 /* DDR for all card after R300 & IGP */
220 rdev
->mc
.vram_is_ddr
= true;
221 /* FIXME: is this correct for RS690/RS740 ? */
222 tmp
= RREG32(RADEON_MEM_CNTL
);
223 if (tmp
& R300_MEM_NUM_CHANNELS_MASK
) {
224 rdev
->mc
.vram_width
= 128;
226 rdev
->mc
.vram_width
= 64;
228 rdev
->mc
.real_vram_size
= RREG32(RADEON_CONFIG_MEMSIZE
);
229 rdev
->mc
.mc_vram_size
= rdev
->mc
.real_vram_size
;
231 rdev
->mc
.aper_base
= drm_get_resource_start(rdev
->ddev
, 0);
232 rdev
->mc
.aper_size
= drm_get_resource_len(rdev
->ddev
, 0);
234 /* FIXME: we should enforce default clock in case GPU is not in
237 a
.full
= rfixed_const(100);
238 rdev
->pm
.sclk
.full
= rfixed_const(rdev
->clock
.default_sclk
);
239 rdev
->pm
.sclk
.full
= rfixed_div(rdev
->pm
.sclk
, a
);
240 a
.full
= rfixed_const(16);
241 /* core_bandwidth = sclk(Mhz) * 16 */
242 rdev
->pm
.core_bandwidth
.full
= rfixed_div(rdev
->pm
.sclk
, a
);
245 void rs690_line_buffer_adjust(struct radeon_device
*rdev
,
246 struct drm_display_mode
*mode1
,
247 struct drm_display_mode
*mode2
)
253 * There is a single line buffer shared by both display controllers.
254 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
255 * the display controllers. The paritioning can either be done
256 * manually or via one of four preset allocations specified in bits 1:0:
257 * 0 - line buffer is divided in half and shared between crtc
258 * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
259 * 2 - D1 gets the whole buffer
260 * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
261 * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual
262 * allocation mode. In manual allocation mode, D1 always starts at 0,
263 * D1 end/2 is specified in bits 14:4; D2 allocation follows D1.
265 tmp
= RREG32(DC_LB_MEMORY_SPLIT
) & ~DC_LB_MEMORY_SPLIT_MASK
;
266 tmp
&= ~DC_LB_MEMORY_SPLIT_SHIFT_MODE
;
268 if (mode1
&& mode2
) {
269 if (mode1
->hdisplay
> mode2
->hdisplay
) {
270 if (mode1
->hdisplay
> 2560)
271 tmp
|= DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q
;
273 tmp
|= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF
;
274 } else if (mode2
->hdisplay
> mode1
->hdisplay
) {
275 if (mode2
->hdisplay
> 2560)
276 tmp
|= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q
;
278 tmp
|= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF
;
280 tmp
|= AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF
;
282 tmp
|= DC_LB_MEMORY_SPLIT_D1_ONLY
;
284 tmp
|= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q
;
286 WREG32(DC_LB_MEMORY_SPLIT
, tmp
);
289 struct rs690_watermark
{
290 u32 lb_request_fifo_depth
;
291 fixed20_12 num_line_pair
;
292 fixed20_12 estimated_width
;
293 fixed20_12 worst_case_latency
;
294 fixed20_12 consumption_rate
;
295 fixed20_12 active_time
;
297 fixed20_12 priority_mark_max
;
298 fixed20_12 priority_mark
;
302 void rs690_crtc_bandwidth_compute(struct radeon_device
*rdev
,
303 struct radeon_crtc
*crtc
,
304 struct rs690_watermark
*wm
)
306 struct drm_display_mode
*mode
= &crtc
->base
.mode
;
308 fixed20_12 pclk
, request_fifo_depth
, tolerable_latency
, estimated_width
;
309 fixed20_12 consumption_time
, line_time
, chunk_time
, read_delay_latency
;
310 /* FIXME: detect IGP with sideport memory, i don't think there is any
311 * such product available
313 bool sideport
= false;
315 if (!crtc
->base
.enabled
) {
316 /* FIXME: wouldn't it better to set priority mark to maximum */
317 wm
->lb_request_fifo_depth
= 4;
321 if (crtc
->vsc
.full
> rfixed_const(2))
322 wm
->num_line_pair
.full
= rfixed_const(2);
324 wm
->num_line_pair
.full
= rfixed_const(1);
326 b
.full
= rfixed_const(mode
->crtc_hdisplay
);
327 c
.full
= rfixed_const(256);
328 a
.full
= rfixed_mul(wm
->num_line_pair
, b
);
329 request_fifo_depth
.full
= rfixed_div(a
, c
);
330 if (a
.full
< rfixed_const(4)) {
331 wm
->lb_request_fifo_depth
= 4;
333 wm
->lb_request_fifo_depth
= rfixed_trunc(request_fifo_depth
);
336 /* Determine consumption rate
337 * pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
338 * vtaps = number of vertical taps,
339 * vsc = vertical scaling ratio, defined as source/destination
340 * hsc = horizontal scaling ration, defined as source/destination
342 a
.full
= rfixed_const(mode
->clock
);
343 b
.full
= rfixed_const(1000);
344 a
.full
= rfixed_div(a
, b
);
345 pclk
.full
= rfixed_div(b
, a
);
346 if (crtc
->rmx_type
!= RMX_OFF
) {
347 b
.full
= rfixed_const(2);
348 if (crtc
->vsc
.full
> b
.full
)
349 b
.full
= crtc
->vsc
.full
;
350 b
.full
= rfixed_mul(b
, crtc
->hsc
);
351 c
.full
= rfixed_const(2);
352 b
.full
= rfixed_div(b
, c
);
353 consumption_time
.full
= rfixed_div(pclk
, b
);
355 consumption_time
.full
= pclk
.full
;
357 a
.full
= rfixed_const(1);
358 wm
->consumption_rate
.full
= rfixed_div(a
, consumption_time
);
361 /* Determine line time
362 * LineTime = total time for one line of displayhtotal
363 * LineTime = total number of horizontal pixels
364 * pclk = pixel clock period(ns)
366 a
.full
= rfixed_const(crtc
->base
.mode
.crtc_htotal
);
367 line_time
.full
= rfixed_mul(a
, pclk
);
369 /* Determine active time
370 * ActiveTime = time of active region of display within one line,
371 * hactive = total number of horizontal active pixels
372 * htotal = total number of horizontal pixels
374 a
.full
= rfixed_const(crtc
->base
.mode
.crtc_htotal
);
375 b
.full
= rfixed_const(crtc
->base
.mode
.crtc_hdisplay
);
376 wm
->active_time
.full
= rfixed_mul(line_time
, b
);
377 wm
->active_time
.full
= rfixed_div(wm
->active_time
, a
);
379 /* Maximun bandwidth is the minimun bandwidth of all component */
380 rdev
->pm
.max_bandwidth
= rdev
->pm
.core_bandwidth
;
382 if (rdev
->pm
.max_bandwidth
.full
> rdev
->pm
.sideport_bandwidth
.full
&&
383 rdev
->pm
.sideport_bandwidth
.full
)
384 rdev
->pm
.max_bandwidth
= rdev
->pm
.sideport_bandwidth
;
385 read_delay_latency
.full
= rfixed_const(370 * 800 * 1000);
386 read_delay_latency
.full
= rfixed_div(read_delay_latency
,
387 rdev
->pm
.igp_sideport_mclk
);
389 if (rdev
->pm
.max_bandwidth
.full
> rdev
->pm
.k8_bandwidth
.full
&&
390 rdev
->pm
.k8_bandwidth
.full
)
391 rdev
->pm
.max_bandwidth
= rdev
->pm
.k8_bandwidth
;
392 if (rdev
->pm
.max_bandwidth
.full
> rdev
->pm
.ht_bandwidth
.full
&&
393 rdev
->pm
.ht_bandwidth
.full
)
394 rdev
->pm
.max_bandwidth
= rdev
->pm
.ht_bandwidth
;
395 read_delay_latency
.full
= rfixed_const(5000);
398 /* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
399 a
.full
= rfixed_const(16);
400 rdev
->pm
.sclk
.full
= rfixed_mul(rdev
->pm
.max_bandwidth
, a
);
401 a
.full
= rfixed_const(1000);
402 rdev
->pm
.sclk
.full
= rfixed_div(a
, rdev
->pm
.sclk
);
403 /* Determine chunk time
404 * ChunkTime = the time it takes the DCP to send one chunk of data
405 * to the LB which consists of pipeline delay and inter chunk gap
406 * sclk = system clock(ns)
408 a
.full
= rfixed_const(256 * 13);
409 chunk_time
.full
= rfixed_mul(rdev
->pm
.sclk
, a
);
410 a
.full
= rfixed_const(10);
411 chunk_time
.full
= rfixed_div(chunk_time
, a
);
413 /* Determine the worst case latency
414 * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
415 * WorstCaseLatency = worst case time from urgent to when the MC starts
417 * READ_DELAY_IDLE_MAX = constant of 1us
418 * ChunkTime = time it takes the DCP to send one chunk of data to the LB
419 * which consists of pipeline delay and inter chunk gap
421 if (rfixed_trunc(wm
->num_line_pair
) > 1) {
422 a
.full
= rfixed_const(3);
423 wm
->worst_case_latency
.full
= rfixed_mul(a
, chunk_time
);
424 wm
->worst_case_latency
.full
+= read_delay_latency
.full
;
426 a
.full
= rfixed_const(2);
427 wm
->worst_case_latency
.full
= rfixed_mul(a
, chunk_time
);
428 wm
->worst_case_latency
.full
+= read_delay_latency
.full
;
431 /* Determine the tolerable latency
432 * TolerableLatency = Any given request has only 1 line time
433 * for the data to be returned
434 * LBRequestFifoDepth = Number of chunk requests the LB can
435 * put into the request FIFO for a display
436 * LineTime = total time for one line of display
437 * ChunkTime = the time it takes the DCP to send one chunk
438 * of data to the LB which consists of
439 * pipeline delay and inter chunk gap
441 if ((2+wm
->lb_request_fifo_depth
) >= rfixed_trunc(request_fifo_depth
)) {
442 tolerable_latency
.full
= line_time
.full
;
444 tolerable_latency
.full
= rfixed_const(wm
->lb_request_fifo_depth
- 2);
445 tolerable_latency
.full
= request_fifo_depth
.full
- tolerable_latency
.full
;
446 tolerable_latency
.full
= rfixed_mul(tolerable_latency
, chunk_time
);
447 tolerable_latency
.full
= line_time
.full
- tolerable_latency
.full
;
449 /* We assume worst case 32bits (4 bytes) */
450 wm
->dbpp
.full
= rfixed_const(4 * 8);
452 /* Determine the maximum priority mark
453 * width = viewport width in pixels
455 a
.full
= rfixed_const(16);
456 wm
->priority_mark_max
.full
= rfixed_const(crtc
->base
.mode
.crtc_hdisplay
);
457 wm
->priority_mark_max
.full
= rfixed_div(wm
->priority_mark_max
, a
);
459 /* Determine estimated width */
460 estimated_width
.full
= tolerable_latency
.full
- wm
->worst_case_latency
.full
;
461 estimated_width
.full
= rfixed_div(estimated_width
, consumption_time
);
462 if (rfixed_trunc(estimated_width
) > crtc
->base
.mode
.crtc_hdisplay
) {
463 wm
->priority_mark
.full
= rfixed_const(10);
465 a
.full
= rfixed_const(16);
466 wm
->priority_mark
.full
= rfixed_div(estimated_width
, a
);
467 wm
->priority_mark
.full
= wm
->priority_mark_max
.full
- wm
->priority_mark
.full
;
471 void rs690_bandwidth_update(struct radeon_device
*rdev
)
473 struct drm_display_mode
*mode0
= NULL
;
474 struct drm_display_mode
*mode1
= NULL
;
475 struct rs690_watermark wm0
;
476 struct rs690_watermark wm1
;
478 fixed20_12 priority_mark02
, priority_mark12
, fill_rate
;
481 if (rdev
->mode_info
.crtcs
[0]->base
.enabled
)
482 mode0
= &rdev
->mode_info
.crtcs
[0]->base
.mode
;
483 if (rdev
->mode_info
.crtcs
[1]->base
.enabled
)
484 mode1
= &rdev
->mode_info
.crtcs
[1]->base
.mode
;
486 * Set display0/1 priority up in the memory controller for
487 * modes if the user specifies HIGH for displaypriority
490 if (rdev
->disp_priority
== 2) {
491 tmp
= RREG32_MC(MC_INIT_MISC_LAT_TIMER
);
492 tmp
&= ~MC_DISP1R_INIT_LAT_MASK
;
493 tmp
&= ~MC_DISP0R_INIT_LAT_MASK
;
495 tmp
|= (1 << MC_DISP1R_INIT_LAT_SHIFT
);
497 tmp
|= (1 << MC_DISP0R_INIT_LAT_SHIFT
);
498 WREG32_MC(MC_INIT_MISC_LAT_TIMER
, tmp
);
500 rs690_line_buffer_adjust(rdev
, mode0
, mode1
);
502 if ((rdev
->family
== CHIP_RS690
) || (rdev
->family
== CHIP_RS740
))
503 WREG32(DCP_CONTROL
, 0);
504 if ((rdev
->family
== CHIP_RS780
) || (rdev
->family
== CHIP_RS880
))
505 WREG32(DCP_CONTROL
, 2);
507 rs690_crtc_bandwidth_compute(rdev
, rdev
->mode_info
.crtcs
[0], &wm0
);
508 rs690_crtc_bandwidth_compute(rdev
, rdev
->mode_info
.crtcs
[1], &wm1
);
510 tmp
= (wm0
.lb_request_fifo_depth
- 1);
511 tmp
|= (wm1
.lb_request_fifo_depth
- 1) << 16;
512 WREG32(LB_MAX_REQ_OUTSTANDING
, tmp
);
514 if (mode0
&& mode1
) {
515 if (rfixed_trunc(wm0
.dbpp
) > 64)
516 a
.full
= rfixed_mul(wm0
.dbpp
, wm0
.num_line_pair
);
518 a
.full
= wm0
.num_line_pair
.full
;
519 if (rfixed_trunc(wm1
.dbpp
) > 64)
520 b
.full
= rfixed_mul(wm1
.dbpp
, wm1
.num_line_pair
);
522 b
.full
= wm1
.num_line_pair
.full
;
524 fill_rate
.full
= rfixed_div(wm0
.sclk
, a
);
525 if (wm0
.consumption_rate
.full
> fill_rate
.full
) {
526 b
.full
= wm0
.consumption_rate
.full
- fill_rate
.full
;
527 b
.full
= rfixed_mul(b
, wm0
.active_time
);
528 a
.full
= rfixed_mul(wm0
.worst_case_latency
,
529 wm0
.consumption_rate
);
530 a
.full
= a
.full
+ b
.full
;
531 b
.full
= rfixed_const(16 * 1000);
532 priority_mark02
.full
= rfixed_div(a
, b
);
534 a
.full
= rfixed_mul(wm0
.worst_case_latency
,
535 wm0
.consumption_rate
);
536 b
.full
= rfixed_const(16 * 1000);
537 priority_mark02
.full
= rfixed_div(a
, b
);
539 if (wm1
.consumption_rate
.full
> fill_rate
.full
) {
540 b
.full
= wm1
.consumption_rate
.full
- fill_rate
.full
;
541 b
.full
= rfixed_mul(b
, wm1
.active_time
);
542 a
.full
= rfixed_mul(wm1
.worst_case_latency
,
543 wm1
.consumption_rate
);
544 a
.full
= a
.full
+ b
.full
;
545 b
.full
= rfixed_const(16 * 1000);
546 priority_mark12
.full
= rfixed_div(a
, b
);
548 a
.full
= rfixed_mul(wm1
.worst_case_latency
,
549 wm1
.consumption_rate
);
550 b
.full
= rfixed_const(16 * 1000);
551 priority_mark12
.full
= rfixed_div(a
, b
);
553 if (wm0
.priority_mark
.full
> priority_mark02
.full
)
554 priority_mark02
.full
= wm0
.priority_mark
.full
;
555 if (rfixed_trunc(priority_mark02
) < 0)
556 priority_mark02
.full
= 0;
557 if (wm0
.priority_mark_max
.full
> priority_mark02
.full
)
558 priority_mark02
.full
= wm0
.priority_mark_max
.full
;
559 if (wm1
.priority_mark
.full
> priority_mark12
.full
)
560 priority_mark12
.full
= wm1
.priority_mark
.full
;
561 if (rfixed_trunc(priority_mark12
) < 0)
562 priority_mark12
.full
= 0;
563 if (wm1
.priority_mark_max
.full
> priority_mark12
.full
)
564 priority_mark12
.full
= wm1
.priority_mark_max
.full
;
565 WREG32(D1MODE_PRIORITY_A_CNT
, rfixed_trunc(priority_mark02
));
566 WREG32(D1MODE_PRIORITY_B_CNT
, rfixed_trunc(priority_mark02
));
567 WREG32(D2MODE_PRIORITY_A_CNT
, rfixed_trunc(priority_mark12
));
568 WREG32(D2MODE_PRIORITY_B_CNT
, rfixed_trunc(priority_mark12
));
570 if (rfixed_trunc(wm0
.dbpp
) > 64)
571 a
.full
= rfixed_mul(wm0
.dbpp
, wm0
.num_line_pair
);
573 a
.full
= wm0
.num_line_pair
.full
;
574 fill_rate
.full
= rfixed_div(wm0
.sclk
, a
);
575 if (wm0
.consumption_rate
.full
> fill_rate
.full
) {
576 b
.full
= wm0
.consumption_rate
.full
- fill_rate
.full
;
577 b
.full
= rfixed_mul(b
, wm0
.active_time
);
578 a
.full
= rfixed_mul(wm0
.worst_case_latency
,
579 wm0
.consumption_rate
);
580 a
.full
= a
.full
+ b
.full
;
581 b
.full
= rfixed_const(16 * 1000);
582 priority_mark02
.full
= rfixed_div(a
, b
);
584 a
.full
= rfixed_mul(wm0
.worst_case_latency
,
585 wm0
.consumption_rate
);
586 b
.full
= rfixed_const(16 * 1000);
587 priority_mark02
.full
= rfixed_div(a
, b
);
589 if (wm0
.priority_mark
.full
> priority_mark02
.full
)
590 priority_mark02
.full
= wm0
.priority_mark
.full
;
591 if (rfixed_trunc(priority_mark02
) < 0)
592 priority_mark02
.full
= 0;
593 if (wm0
.priority_mark_max
.full
> priority_mark02
.full
)
594 priority_mark02
.full
= wm0
.priority_mark_max
.full
;
595 WREG32(D1MODE_PRIORITY_A_CNT
, rfixed_trunc(priority_mark02
));
596 WREG32(D1MODE_PRIORITY_B_CNT
, rfixed_trunc(priority_mark02
));
597 WREG32(D2MODE_PRIORITY_A_CNT
, MODE_PRIORITY_OFF
);
598 WREG32(D2MODE_PRIORITY_B_CNT
, MODE_PRIORITY_OFF
);
600 if (rfixed_trunc(wm1
.dbpp
) > 64)
601 a
.full
= rfixed_mul(wm1
.dbpp
, wm1
.num_line_pair
);
603 a
.full
= wm1
.num_line_pair
.full
;
604 fill_rate
.full
= rfixed_div(wm1
.sclk
, a
);
605 if (wm1
.consumption_rate
.full
> fill_rate
.full
) {
606 b
.full
= wm1
.consumption_rate
.full
- fill_rate
.full
;
607 b
.full
= rfixed_mul(b
, wm1
.active_time
);
608 a
.full
= rfixed_mul(wm1
.worst_case_latency
,
609 wm1
.consumption_rate
);
610 a
.full
= a
.full
+ b
.full
;
611 b
.full
= rfixed_const(16 * 1000);
612 priority_mark12
.full
= rfixed_div(a
, b
);
614 a
.full
= rfixed_mul(wm1
.worst_case_latency
,
615 wm1
.consumption_rate
);
616 b
.full
= rfixed_const(16 * 1000);
617 priority_mark12
.full
= rfixed_div(a
, b
);
619 if (wm1
.priority_mark
.full
> priority_mark12
.full
)
620 priority_mark12
.full
= wm1
.priority_mark
.full
;
621 if (rfixed_trunc(priority_mark12
) < 0)
622 priority_mark12
.full
= 0;
623 if (wm1
.priority_mark_max
.full
> priority_mark12
.full
)
624 priority_mark12
.full
= wm1
.priority_mark_max
.full
;
625 WREG32(D1MODE_PRIORITY_A_CNT
, MODE_PRIORITY_OFF
);
626 WREG32(D1MODE_PRIORITY_B_CNT
, MODE_PRIORITY_OFF
);
627 WREG32(D2MODE_PRIORITY_A_CNT
, rfixed_trunc(priority_mark12
));
628 WREG32(D2MODE_PRIORITY_B_CNT
, rfixed_trunc(priority_mark12
));
633 * Indirect registers accessor
635 uint32_t rs690_mc_rreg(struct radeon_device
*rdev
, uint32_t reg
)
639 WREG32(RS690_MC_INDEX
, (reg
& RS690_MC_INDEX_MASK
));
640 r
= RREG32(RS690_MC_DATA
);
641 WREG32(RS690_MC_INDEX
, RS690_MC_INDEX_MASK
);
645 void rs690_mc_wreg(struct radeon_device
*rdev
, uint32_t reg
, uint32_t v
)
647 WREG32(RS690_MC_INDEX
,
648 RS690_MC_INDEX_WR_EN
| ((reg
) & RS690_MC_INDEX_MASK
));
649 WREG32(RS690_MC_DATA
, v
);
650 WREG32(RS690_MC_INDEX
, RS690_MC_INDEX_WR_ACK
);