2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
29 #include "intel_drv.h"
31 #include <linux/kernel.h>
32 #include <machine/clock.h>
34 #define FORCEWAKE_ACK_TIMEOUT_MS 2
36 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
37 * framebuffer contents in-memory, aiming at reducing the required bandwidth
38 * during in-memory transfers and, therefore, reduce the power packet.
40 * The benefits of FBC are mostly visible with solid backgrounds and
41 * variation-less patterns.
43 * FBC-related functionality can be enabled by the means of the
44 * i915.i915_enable_fbc parameter
47 static bool intel_crtc_active(struct drm_crtc
*crtc
)
49 /* Be paranoid as we can arrive here with only partial
50 * state retrieved from the hardware during setup.
52 return to_intel_crtc(crtc
)->active
&& crtc
->fb
&& crtc
->mode
.clock
;
55 static void i8xx_disable_fbc(struct drm_device
*dev
)
57 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
60 /* Disable compression */
61 fbc_ctl
= I915_READ(FBC_CONTROL
);
62 if ((fbc_ctl
& FBC_CTL_EN
) == 0)
65 fbc_ctl
&= ~FBC_CTL_EN
;
66 I915_WRITE(FBC_CONTROL
, fbc_ctl
);
68 /* Wait for compressing bit to clear */
69 if (wait_for((I915_READ(FBC_STATUS
) & FBC_STAT_COMPRESSING
) == 0, 10)) {
70 DRM_DEBUG_KMS("FBC idle timed out\n");
74 DRM_DEBUG_KMS("disabled FBC\n");
77 static void i8xx_enable_fbc(struct drm_crtc
*crtc
, unsigned long interval
)
79 struct drm_device
*dev
= crtc
->dev
;
80 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
81 struct drm_framebuffer
*fb
= crtc
->fb
;
82 struct intel_framebuffer
*intel_fb
= to_intel_framebuffer(fb
);
83 struct drm_i915_gem_object
*obj
= intel_fb
->obj
;
84 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
87 u32 fbc_ctl
, fbc_ctl2
;
89 cfb_pitch
= dev_priv
->cfb_size
/ FBC_LL_SIZE
;
90 if (fb
->pitches
[0] < cfb_pitch
)
91 cfb_pitch
= fb
->pitches
[0];
93 /* FBC_CTL wants 64B units */
94 cfb_pitch
= (cfb_pitch
/ 64) - 1;
95 plane
= intel_crtc
->plane
== 0 ? FBC_CTL_PLANEA
: FBC_CTL_PLANEB
;
98 for (i
= 0; i
< (FBC_LL_SIZE
/ 32) + 1; i
++)
99 I915_WRITE(FBC_TAG
+ (i
* 4), 0);
102 fbc_ctl2
= FBC_CTL_FENCE_DBL
| FBC_CTL_IDLE_IMM
| FBC_CTL_CPU_FENCE
;
104 I915_WRITE(FBC_CONTROL2
, fbc_ctl2
);
105 I915_WRITE(FBC_FENCE_OFF
, crtc
->y
);
108 fbc_ctl
= FBC_CTL_EN
| FBC_CTL_PERIODIC
;
110 fbc_ctl
|= FBC_CTL_C3_IDLE
; /* 945 needs special SR handling */
111 fbc_ctl
|= (cfb_pitch
& 0xff) << FBC_CTL_STRIDE_SHIFT
;
112 fbc_ctl
|= (interval
& 0x2fff) << FBC_CTL_INTERVAL_SHIFT
;
113 fbc_ctl
|= obj
->fence_reg
;
114 I915_WRITE(FBC_CONTROL
, fbc_ctl
);
116 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
117 cfb_pitch
, crtc
->y
, intel_crtc
->plane
);
120 static bool i8xx_fbc_enabled(struct drm_device
*dev
)
122 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
124 return I915_READ(FBC_CONTROL
) & FBC_CTL_EN
;
127 static void g4x_enable_fbc(struct drm_crtc
*crtc
, unsigned long interval
)
129 struct drm_device
*dev
= crtc
->dev
;
130 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
131 struct drm_framebuffer
*fb
= crtc
->fb
;
132 struct intel_framebuffer
*intel_fb
= to_intel_framebuffer(fb
);
133 struct drm_i915_gem_object
*obj
= intel_fb
->obj
;
134 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
135 int plane
= intel_crtc
->plane
== 0 ? DPFC_CTL_PLANEA
: DPFC_CTL_PLANEB
;
136 unsigned long stall_watermark
= 200;
139 dpfc_ctl
= plane
| DPFC_SR_EN
| DPFC_CTL_LIMIT_1X
;
140 dpfc_ctl
|= DPFC_CTL_FENCE_EN
| obj
->fence_reg
;
141 I915_WRITE(DPFC_CHICKEN
, DPFC_HT_MODIFY
);
143 I915_WRITE(DPFC_RECOMP_CTL
, DPFC_RECOMP_STALL_EN
|
144 (stall_watermark
<< DPFC_RECOMP_STALL_WM_SHIFT
) |
145 (interval
<< DPFC_RECOMP_TIMER_COUNT_SHIFT
));
146 I915_WRITE(DPFC_FENCE_YOFF
, crtc
->y
);
149 I915_WRITE(DPFC_CONTROL
, I915_READ(DPFC_CONTROL
) | DPFC_CTL_EN
);
151 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc
->plane
);
154 static void g4x_disable_fbc(struct drm_device
*dev
)
156 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
159 /* Disable compression */
160 dpfc_ctl
= I915_READ(DPFC_CONTROL
);
161 if (dpfc_ctl
& DPFC_CTL_EN
) {
162 dpfc_ctl
&= ~DPFC_CTL_EN
;
163 I915_WRITE(DPFC_CONTROL
, dpfc_ctl
);
165 DRM_DEBUG_KMS("disabled FBC\n");
169 static bool g4x_fbc_enabled(struct drm_device
*dev
)
171 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
173 return I915_READ(DPFC_CONTROL
) & DPFC_CTL_EN
;
176 static void sandybridge_blit_fbc_update(struct drm_device
*dev
)
178 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
181 /* Make sure blitter notifies FBC of writes */
182 gen6_gt_force_wake_get(dev_priv
);
183 blt_ecoskpd
= I915_READ(GEN6_BLITTER_ECOSKPD
);
184 blt_ecoskpd
|= GEN6_BLITTER_FBC_NOTIFY
<<
185 GEN6_BLITTER_LOCK_SHIFT
;
186 I915_WRITE(GEN6_BLITTER_ECOSKPD
, blt_ecoskpd
);
187 blt_ecoskpd
|= GEN6_BLITTER_FBC_NOTIFY
;
188 I915_WRITE(GEN6_BLITTER_ECOSKPD
, blt_ecoskpd
);
189 blt_ecoskpd
&= ~(GEN6_BLITTER_FBC_NOTIFY
<<
190 GEN6_BLITTER_LOCK_SHIFT
);
191 I915_WRITE(GEN6_BLITTER_ECOSKPD
, blt_ecoskpd
);
192 POSTING_READ(GEN6_BLITTER_ECOSKPD
);
193 gen6_gt_force_wake_put(dev_priv
);
196 static void ironlake_enable_fbc(struct drm_crtc
*crtc
, unsigned long interval
)
198 struct drm_device
*dev
= crtc
->dev
;
199 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
200 struct drm_framebuffer
*fb
= crtc
->fb
;
201 struct intel_framebuffer
*intel_fb
= to_intel_framebuffer(fb
);
202 struct drm_i915_gem_object
*obj
= intel_fb
->obj
;
203 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
204 int plane
= intel_crtc
->plane
== 0 ? DPFC_CTL_PLANEA
: DPFC_CTL_PLANEB
;
205 unsigned long stall_watermark
= 200;
208 dpfc_ctl
= I915_READ(ILK_DPFC_CONTROL
);
209 dpfc_ctl
&= DPFC_RESERVED
;
210 dpfc_ctl
|= (plane
| DPFC_CTL_LIMIT_1X
);
211 /* Set persistent mode for front-buffer rendering, ala X. */
212 dpfc_ctl
|= DPFC_CTL_PERSISTENT_MODE
;
213 dpfc_ctl
|= (DPFC_CTL_FENCE_EN
| obj
->fence_reg
);
214 I915_WRITE(ILK_DPFC_CHICKEN
, DPFC_HT_MODIFY
);
216 I915_WRITE(ILK_DPFC_RECOMP_CTL
, DPFC_RECOMP_STALL_EN
|
217 (stall_watermark
<< DPFC_RECOMP_STALL_WM_SHIFT
) |
218 (interval
<< DPFC_RECOMP_TIMER_COUNT_SHIFT
));
219 I915_WRITE(ILK_DPFC_FENCE_YOFF
, crtc
->y
);
220 I915_WRITE(ILK_FBC_RT_BASE
, obj
->gtt_offset
| ILK_FBC_RT_VALID
);
222 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
225 I915_WRITE(SNB_DPFC_CTL_SA
,
226 SNB_CPU_FENCE_ENABLE
| obj
->fence_reg
);
227 I915_WRITE(DPFC_CPU_FENCE_OFFSET
, crtc
->y
);
228 sandybridge_blit_fbc_update(dev
);
231 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc
->plane
);
234 static void ironlake_disable_fbc(struct drm_device
*dev
)
236 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
239 /* Disable compression */
240 dpfc_ctl
= I915_READ(ILK_DPFC_CONTROL
);
241 if (dpfc_ctl
& DPFC_CTL_EN
) {
242 dpfc_ctl
&= ~DPFC_CTL_EN
;
243 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
);
245 DRM_DEBUG_KMS("disabled FBC\n");
249 static bool ironlake_fbc_enabled(struct drm_device
*dev
)
251 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
253 return I915_READ(ILK_DPFC_CONTROL
) & DPFC_CTL_EN
;
256 bool intel_fbc_enabled(struct drm_device
*dev
)
258 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
260 if (!dev_priv
->display
.fbc_enabled
)
263 return dev_priv
->display
.fbc_enabled(dev
);
266 static void intel_fbc_work_fn(struct work_struct
*__work
)
268 struct intel_fbc_work
*work
=
269 container_of(to_delayed_work(__work
),
270 struct intel_fbc_work
, work
);
271 struct drm_device
*dev
= work
->crtc
->dev
;
272 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
275 if (work
== dev_priv
->fbc_work
) {
276 /* Double check that we haven't switched fb without cancelling
279 if (work
->crtc
->fb
== work
->fb
) {
280 dev_priv
->display
.enable_fbc(work
->crtc
,
283 dev_priv
->cfb_plane
= to_intel_crtc(work
->crtc
)->plane
;
284 dev_priv
->cfb_fb
= work
->crtc
->fb
->base
.id
;
285 dev_priv
->cfb_y
= work
->crtc
->y
;
288 dev_priv
->fbc_work
= NULL
;
295 static void intel_cancel_fbc_work(struct drm_i915_private
*dev_priv
)
297 if (dev_priv
->fbc_work
== NULL
)
300 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
302 /* Synchronisation is provided by struct_mutex and checking of
303 * dev_priv->fbc_work, so we can perform the cancellation
304 * entirely asynchronously.
306 if (cancel_delayed_work(&dev_priv
->fbc_work
->work
))
307 /* tasklet was killed before being run, clean up */
308 kfree(dev_priv
->fbc_work
, M_DRM
);
310 /* Mark the work as no longer wanted so that if it does
311 * wake-up (because the work was already running and waiting
312 * for our mutex), it will discover that is no longer
315 dev_priv
->fbc_work
= NULL
;
318 void intel_enable_fbc(struct drm_crtc
*crtc
, unsigned long interval
)
320 struct intel_fbc_work
*work
;
321 struct drm_device
*dev
= crtc
->dev
;
322 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
324 if (!dev_priv
->display
.enable_fbc
)
327 intel_cancel_fbc_work(dev_priv
);
329 work
= kmalloc(sizeof(*work
), M_DRM
, M_WAITOK
| M_ZERO
);
331 dev_priv
->display
.enable_fbc(crtc
, interval
);
337 work
->interval
= interval
;
338 INIT_DELAYED_WORK(&work
->work
, intel_fbc_work_fn
);
340 dev_priv
->fbc_work
= work
;
342 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
344 /* Delay the actual enabling to let pageflipping cease and the
345 * display to settle before starting the compression. Note that
346 * this delay also serves a second purpose: it allows for a
347 * vblank to pass after disabling the FBC before we attempt
348 * to modify the control registers.
350 * A more complicated solution would involve tracking vblanks
351 * following the termination of the page-flipping sequence
352 * and indeed performing the enable as a co-routine and not
353 * waiting synchronously upon the vblank.
355 schedule_delayed_work(&work
->work
, msecs_to_jiffies(50));
358 void intel_disable_fbc(struct drm_device
*dev
)
360 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
362 intel_cancel_fbc_work(dev_priv
);
364 if (!dev_priv
->display
.disable_fbc
)
367 dev_priv
->display
.disable_fbc(dev
);
368 dev_priv
->cfb_plane
= -1;
372 * intel_update_fbc - enable/disable FBC as needed
373 * @dev: the drm_device
375 * Set up the framebuffer compression hardware at mode set time. We
376 * enable it if possible:
377 * - plane A only (on pre-965)
378 * - no pixel mulitply/line duplication
379 * - no alpha buffer discard
381 * - framebuffer <= 2048 in width, 1536 in height
383 * We can't assume that any compression will take place (worst case),
384 * so the compressed buffer has to be the same size as the uncompressed
385 * one. It also must reside (along with the line length buffer) in
388 * We need to enable/disable FBC on a global basis.
390 void intel_update_fbc(struct drm_device
*dev
)
392 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
393 struct drm_crtc
*crtc
= NULL
, *tmp_crtc
;
394 struct intel_crtc
*intel_crtc
;
395 struct drm_framebuffer
*fb
;
396 struct intel_framebuffer
*intel_fb
;
397 struct drm_i915_gem_object
*obj
;
403 if (!I915_HAS_FBC(dev
))
407 * If FBC is already on, we just have to verify that we can
408 * keep it that way...
409 * Need to disable if:
410 * - more than one pipe is active
411 * - changing FBC params (stride, fence, mode)
412 * - new fb is too large to fit in compressed buffer
413 * - going to an unsupported config (interlace, pixel multiply, etc.)
415 list_for_each_entry(tmp_crtc
, &dev
->mode_config
.crtc_list
, head
) {
416 if (intel_crtc_active(tmp_crtc
) &&
417 !to_intel_crtc(tmp_crtc
)->primary_disabled
) {
419 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
420 dev_priv
->no_fbc_reason
= FBC_MULTIPLE_PIPES
;
427 if (!crtc
|| crtc
->fb
== NULL
) {
428 DRM_DEBUG_KMS("no output, disabling\n");
429 dev_priv
->no_fbc_reason
= FBC_NO_OUTPUT
;
433 intel_crtc
= to_intel_crtc(crtc
);
435 intel_fb
= to_intel_framebuffer(fb
);
438 enable_fbc
= i915_enable_fbc
;
439 if (enable_fbc
< 0) {
440 DRM_DEBUG_KMS("fbc set to per-chip default\n");
442 if (INTEL_INFO(dev
)->gen
<= 6)
446 DRM_DEBUG_KMS("fbc disabled per module param\n");
447 dev_priv
->no_fbc_reason
= FBC_MODULE_PARAM
;
450 if (intel_fb
->obj
->base
.size
> dev_priv
->cfb_size
) {
451 DRM_DEBUG_KMS("framebuffer too large, disabling "
453 dev_priv
->no_fbc_reason
= FBC_STOLEN_TOO_SMALL
;
456 if ((crtc
->mode
.flags
& DRM_MODE_FLAG_INTERLACE
) ||
457 (crtc
->mode
.flags
& DRM_MODE_FLAG_DBLSCAN
)) {
458 DRM_DEBUG_KMS("mode incompatible with compression, "
460 dev_priv
->no_fbc_reason
= FBC_UNSUPPORTED_MODE
;
463 if ((crtc
->mode
.hdisplay
> 2048) ||
464 (crtc
->mode
.vdisplay
> 1536)) {
465 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
466 dev_priv
->no_fbc_reason
= FBC_MODE_TOO_LARGE
;
469 if ((IS_I915GM(dev
) || IS_I945GM(dev
)) && intel_crtc
->plane
!= 0) {
470 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
471 dev_priv
->no_fbc_reason
= FBC_BAD_PLANE
;
475 /* The use of a CPU fence is mandatory in order to detect writes
476 * by the CPU to the scanout and trigger updates to the FBC.
478 if (obj
->tiling_mode
!= I915_TILING_X
||
479 obj
->fence_reg
== I915_FENCE_REG_NONE
) {
480 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
481 dev_priv
->no_fbc_reason
= FBC_NOT_TILED
;
486 /* If the kernel debugger is active, always disable compression */
491 /* If the scanout has not changed, don't modify the FBC settings.
492 * Note that we make the fundamental assumption that the fb->obj
493 * cannot be unpinned (and have its GTT offset and fence revoked)
494 * without first being decoupled from the scanout and FBC disabled.
496 if (dev_priv
->cfb_plane
== intel_crtc
->plane
&&
497 dev_priv
->cfb_fb
== fb
->base
.id
&&
498 dev_priv
->cfb_y
== crtc
->y
)
501 if (intel_fbc_enabled(dev
)) {
502 /* We update FBC along two paths, after changing fb/crtc
503 * configuration (modeswitching) and after page-flipping
504 * finishes. For the latter, we know that not only did
505 * we disable the FBC at the start of the page-flip
506 * sequence, but also more than one vblank has passed.
508 * For the former case of modeswitching, it is possible
509 * to switch between two FBC valid configurations
510 * instantaneously so we do need to disable the FBC
511 * before we can modify its control registers. We also
512 * have to wait for the next vblank for that to take
513 * effect. However, since we delay enabling FBC we can
514 * assume that a vblank has passed since disabling and
515 * that we can safely alter the registers in the deferred
518 * In the scenario that we go from a valid to invalid
519 * and then back to valid FBC configuration we have
520 * no strict enforcement that a vblank occurred since
521 * disabling the FBC. However, along all current pipe
522 * disabling paths we do need to wait for a vblank at
523 * some point. And we wait before enabling FBC anyway.
525 DRM_DEBUG_KMS("disabling active FBC for update\n");
526 intel_disable_fbc(dev
);
529 intel_enable_fbc(crtc
, 500);
533 /* Multiple disables should be harmless */
534 if (intel_fbc_enabled(dev
)) {
535 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
536 intel_disable_fbc(dev
);
540 static void i915_pineview_get_mem_freq(struct drm_device
*dev
)
542 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
545 tmp
= I915_READ(CLKCFG
);
547 switch (tmp
& CLKCFG_FSB_MASK
) {
549 dev_priv
->fsb_freq
= 533; /* 133*4 */
552 dev_priv
->fsb_freq
= 800; /* 200*4 */
555 dev_priv
->fsb_freq
= 667; /* 167*4 */
558 dev_priv
->fsb_freq
= 400; /* 100*4 */
562 switch (tmp
& CLKCFG_MEM_MASK
) {
564 dev_priv
->mem_freq
= 533;
567 dev_priv
->mem_freq
= 667;
570 dev_priv
->mem_freq
= 800;
574 /* detect pineview DDR3 setting */
575 tmp
= I915_READ(CSHRDDR3CTL
);
576 dev_priv
->is_ddr3
= (tmp
& CSHRDDR3CTL_DDR3
) ? 1 : 0;
579 static void i915_ironlake_get_mem_freq(struct drm_device
*dev
)
581 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
584 ddrpll
= I915_READ16(DDRMPLL1
);
585 csipll
= I915_READ16(CSIPLL0
);
587 switch (ddrpll
& 0xff) {
589 dev_priv
->mem_freq
= 800;
592 dev_priv
->mem_freq
= 1066;
595 dev_priv
->mem_freq
= 1333;
598 dev_priv
->mem_freq
= 1600;
601 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
603 dev_priv
->mem_freq
= 0;
607 dev_priv
->ips
.r_t
= dev_priv
->mem_freq
;
609 switch (csipll
& 0x3ff) {
611 dev_priv
->fsb_freq
= 3200;
614 dev_priv
->fsb_freq
= 3733;
617 dev_priv
->fsb_freq
= 4266;
620 dev_priv
->fsb_freq
= 4800;
623 dev_priv
->fsb_freq
= 5333;
626 dev_priv
->fsb_freq
= 5866;
629 dev_priv
->fsb_freq
= 6400;
632 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
634 dev_priv
->fsb_freq
= 0;
638 if (dev_priv
->fsb_freq
== 3200) {
639 dev_priv
->ips
.c_m
= 0;
640 } else if (dev_priv
->fsb_freq
> 3200 && dev_priv
->fsb_freq
<= 4800) {
641 dev_priv
->ips
.c_m
= 1;
643 dev_priv
->ips
.c_m
= 2;
647 static const struct cxsr_latency cxsr_latency_table
[] = {
648 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
649 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
650 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
651 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
652 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
654 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
655 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
656 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
657 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
658 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
660 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
661 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
662 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
663 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
664 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
666 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
667 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
668 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
669 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
670 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
672 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
673 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
674 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
675 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
676 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
678 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
679 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
680 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
681 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
682 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
685 static const struct cxsr_latency
*intel_get_cxsr_latency(int is_desktop
,
690 const struct cxsr_latency
*latency
;
693 if (fsb
== 0 || mem
== 0)
696 for (i
= 0; i
< ARRAY_SIZE(cxsr_latency_table
); i
++) {
697 latency
= &cxsr_latency_table
[i
];
698 if (is_desktop
== latency
->is_desktop
&&
699 is_ddr3
== latency
->is_ddr3
&&
700 fsb
== latency
->fsb_freq
&& mem
== latency
->mem_freq
)
704 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
709 static void pineview_disable_cxsr(struct drm_device
*dev
)
711 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
713 /* deactivate cxsr */
714 I915_WRITE(DSPFW3
, I915_READ(DSPFW3
) & ~PINEVIEW_SELF_REFRESH_EN
);
718 * Latency for FIFO fetches is dependent on several factors:
719 * - memory configuration (speed, channels)
721 * - current MCH state
722 * It can be fairly high in some situations, so here we assume a fairly
723 * pessimal value. It's a tradeoff between extra memory fetches (if we
724 * set this value too high, the FIFO will fetch frequently to stay full)
725 * and power consumption (set it too low to save power and we might see
726 * FIFO underruns and display "flicker").
728 * A value of 5us seems to be a good balance; safe for very low end
729 * platforms but not overly aggressive on lower latency configs.
731 static const int latency_ns
= 5000;
733 static int i9xx_get_fifo_size(struct drm_device
*dev
, int plane
)
735 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
736 uint32_t dsparb
= I915_READ(DSPARB
);
739 size
= dsparb
& 0x7f;
741 size
= ((dsparb
>> DSPARB_CSTART_SHIFT
) & 0x7f) - size
;
743 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
744 plane
? "B" : "A", size
);
749 static int i85x_get_fifo_size(struct drm_device
*dev
, int plane
)
751 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
752 uint32_t dsparb
= I915_READ(DSPARB
);
755 size
= dsparb
& 0x1ff;
757 size
= ((dsparb
>> DSPARB_BEND_SHIFT
) & 0x1ff) - size
;
758 size
>>= 1; /* Convert to cachelines */
760 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
761 plane
? "B" : "A", size
);
766 static int i845_get_fifo_size(struct drm_device
*dev
, int plane
)
768 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
769 uint32_t dsparb
= I915_READ(DSPARB
);
772 size
= dsparb
& 0x7f;
773 size
>>= 2; /* Convert to cachelines */
775 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
782 static int i830_get_fifo_size(struct drm_device
*dev
, int plane
)
784 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
785 uint32_t dsparb
= I915_READ(DSPARB
);
788 size
= dsparb
& 0x7f;
789 size
>>= 1; /* Convert to cachelines */
791 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
792 plane
? "B" : "A", size
);
797 /* Pineview has different values for various configs */
798 static const struct intel_watermark_params pineview_display_wm
= {
799 PINEVIEW_DISPLAY_FIFO
,
803 PINEVIEW_FIFO_LINE_SIZE
805 static const struct intel_watermark_params pineview_display_hplloff_wm
= {
806 PINEVIEW_DISPLAY_FIFO
,
808 PINEVIEW_DFT_HPLLOFF_WM
,
810 PINEVIEW_FIFO_LINE_SIZE
812 static const struct intel_watermark_params pineview_cursor_wm
= {
813 PINEVIEW_CURSOR_FIFO
,
814 PINEVIEW_CURSOR_MAX_WM
,
815 PINEVIEW_CURSOR_DFT_WM
,
816 PINEVIEW_CURSOR_GUARD_WM
,
817 PINEVIEW_FIFO_LINE_SIZE
,
819 static const struct intel_watermark_params pineview_cursor_hplloff_wm
= {
820 PINEVIEW_CURSOR_FIFO
,
821 PINEVIEW_CURSOR_MAX_WM
,
822 PINEVIEW_CURSOR_DFT_WM
,
823 PINEVIEW_CURSOR_GUARD_WM
,
824 PINEVIEW_FIFO_LINE_SIZE
826 static const struct intel_watermark_params g4x_wm_info
= {
833 static const struct intel_watermark_params g4x_cursor_wm_info
= {
840 static const struct intel_watermark_params valleyview_wm_info
= {
841 VALLEYVIEW_FIFO_SIZE
,
847 static const struct intel_watermark_params valleyview_cursor_wm_info
= {
849 VALLEYVIEW_CURSOR_MAX_WM
,
854 static const struct intel_watermark_params i965_cursor_wm_info
= {
861 static const struct intel_watermark_params i945_wm_info
= {
868 static const struct intel_watermark_params i915_wm_info
= {
875 static const struct intel_watermark_params i855_wm_info
= {
882 static const struct intel_watermark_params i830_wm_info
= {
890 static const struct intel_watermark_params ironlake_display_wm_info
= {
897 static const struct intel_watermark_params ironlake_cursor_wm_info
= {
904 static const struct intel_watermark_params ironlake_display_srwm_info
= {
906 ILK_DISPLAY_MAX_SRWM
,
907 ILK_DISPLAY_DFT_SRWM
,
911 static const struct intel_watermark_params ironlake_cursor_srwm_info
= {
919 static const struct intel_watermark_params sandybridge_display_wm_info
= {
926 static const struct intel_watermark_params sandybridge_cursor_wm_info
= {
933 static const struct intel_watermark_params sandybridge_display_srwm_info
= {
935 SNB_DISPLAY_MAX_SRWM
,
936 SNB_DISPLAY_DFT_SRWM
,
940 static const struct intel_watermark_params sandybridge_cursor_srwm_info
= {
950 * intel_calculate_wm - calculate watermark level
951 * @clock_in_khz: pixel clock
952 * @wm: chip FIFO params
953 * @pixel_size: display pixel size
954 * @latency_ns: memory latency for the platform
956 * Calculate the watermark level (the level at which the display plane will
957 * start fetching from memory again). Each chip has a different display
958 * FIFO size and allocation, so the caller needs to figure that out and pass
959 * in the correct intel_watermark_params structure.
961 * As the pixel clock runs, the FIFO will be drained at a rate that depends
962 * on the pixel size. When it reaches the watermark level, it'll start
963 * fetching FIFO line sized based chunks from memory until the FIFO fills
964 * past the watermark point. If the FIFO drains completely, a FIFO underrun
965 * will occur, and a display engine hang could result.
967 static unsigned long intel_calculate_wm(unsigned long clock_in_khz
,
968 const struct intel_watermark_params
*wm
,
971 unsigned long latency_ns
)
973 long entries_required
, wm_size
;
976 * Note: we need to make sure we don't overflow for various clock &
978 * clocks go from a few thousand to several hundred thousand.
979 * latency is usually a few thousand
981 entries_required
= ((clock_in_khz
/ 1000) * pixel_size
* latency_ns
) /
983 entries_required
= DIV_ROUND_UP(entries_required
, wm
->cacheline_size
);
985 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required
);
987 wm_size
= fifo_size
- (entries_required
+ wm
->guard_size
);
989 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size
);
991 /* Don't promote wm_size to unsigned... */
992 if (wm_size
> (long)wm
->max_wm
)
993 wm_size
= wm
->max_wm
;
995 wm_size
= wm
->default_wm
;
999 static struct drm_crtc
*single_enabled_crtc(struct drm_device
*dev
)
1001 struct drm_crtc
*crtc
, *enabled
= NULL
;
1003 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
1004 if (intel_crtc_active(crtc
)) {
1014 static void pineview_update_wm(struct drm_device
*dev
)
1016 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1017 struct drm_crtc
*crtc
;
1018 const struct cxsr_latency
*latency
;
1022 latency
= intel_get_cxsr_latency(IS_PINEVIEW_G(dev
), dev_priv
->is_ddr3
,
1023 dev_priv
->fsb_freq
, dev_priv
->mem_freq
);
1025 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1026 pineview_disable_cxsr(dev
);
1030 crtc
= single_enabled_crtc(dev
);
1032 int clock
= crtc
->mode
.clock
;
1033 int pixel_size
= crtc
->fb
->bits_per_pixel
/ 8;
1036 wm
= intel_calculate_wm(clock
, &pineview_display_wm
,
1037 pineview_display_wm
.fifo_size
,
1038 pixel_size
, latency
->display_sr
);
1039 reg
= I915_READ(DSPFW1
);
1040 reg
&= ~DSPFW_SR_MASK
;
1041 reg
|= wm
<< DSPFW_SR_SHIFT
;
1042 I915_WRITE(DSPFW1
, reg
);
1043 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg
);
1046 wm
= intel_calculate_wm(clock
, &pineview_cursor_wm
,
1047 pineview_display_wm
.fifo_size
,
1048 pixel_size
, latency
->cursor_sr
);
1049 reg
= I915_READ(DSPFW3
);
1050 reg
&= ~DSPFW_CURSOR_SR_MASK
;
1051 reg
|= (wm
& 0x3f) << DSPFW_CURSOR_SR_SHIFT
;
1052 I915_WRITE(DSPFW3
, reg
);
1054 /* Display HPLL off SR */
1055 wm
= intel_calculate_wm(clock
, &pineview_display_hplloff_wm
,
1056 pineview_display_hplloff_wm
.fifo_size
,
1057 pixel_size
, latency
->display_hpll_disable
);
1058 reg
= I915_READ(DSPFW3
);
1059 reg
&= ~DSPFW_HPLL_SR_MASK
;
1060 reg
|= wm
& DSPFW_HPLL_SR_MASK
;
1061 I915_WRITE(DSPFW3
, reg
);
1063 /* cursor HPLL off SR */
1064 wm
= intel_calculate_wm(clock
, &pineview_cursor_hplloff_wm
,
1065 pineview_display_hplloff_wm
.fifo_size
,
1066 pixel_size
, latency
->cursor_hpll_disable
);
1067 reg
= I915_READ(DSPFW3
);
1068 reg
&= ~DSPFW_HPLL_CURSOR_MASK
;
1069 reg
|= (wm
& 0x3f) << DSPFW_HPLL_CURSOR_SHIFT
;
1070 I915_WRITE(DSPFW3
, reg
);
1071 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg
);
1075 I915_READ(DSPFW3
) | PINEVIEW_SELF_REFRESH_EN
);
1076 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1078 pineview_disable_cxsr(dev
);
1079 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1083 static bool g4x_compute_wm0(struct drm_device
*dev
,
1085 const struct intel_watermark_params
*display
,
1086 int display_latency_ns
,
1087 const struct intel_watermark_params
*cursor
,
1088 int cursor_latency_ns
,
1092 struct drm_crtc
*crtc
;
1093 int htotal
, hdisplay
, clock
, pixel_size
;
1094 int line_time_us
, line_count
;
1095 int entries
, tlb_miss
;
1097 crtc
= intel_get_crtc_for_plane(dev
, plane
);
1098 if (!intel_crtc_active(crtc
)) {
1099 *cursor_wm
= cursor
->guard_size
;
1100 *plane_wm
= display
->guard_size
;
1104 htotal
= crtc
->mode
.htotal
;
1105 hdisplay
= crtc
->mode
.hdisplay
;
1106 clock
= crtc
->mode
.clock
;
1107 pixel_size
= crtc
->fb
->bits_per_pixel
/ 8;
1109 /* Use the small buffer method to calculate plane watermark */
1110 entries
= ((clock
* pixel_size
/ 1000) * display_latency_ns
) / 1000;
1111 tlb_miss
= display
->fifo_size
*display
->cacheline_size
- hdisplay
* 8;
1113 entries
+= tlb_miss
;
1114 entries
= DIV_ROUND_UP(entries
, display
->cacheline_size
);
1115 *plane_wm
= entries
+ display
->guard_size
;
1116 if (*plane_wm
> (int)display
->max_wm
)
1117 *plane_wm
= display
->max_wm
;
1119 /* Use the large buffer method to calculate cursor watermark */
1120 line_time_us
= ((htotal
* 1000) / clock
);
1121 line_count
= (cursor_latency_ns
/ line_time_us
+ 1000) / 1000;
1122 entries
= line_count
* 64 * pixel_size
;
1123 tlb_miss
= cursor
->fifo_size
*cursor
->cacheline_size
- hdisplay
* 8;
1125 entries
+= tlb_miss
;
1126 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
1127 *cursor_wm
= entries
+ cursor
->guard_size
;
1128 if (*cursor_wm
> (int)cursor
->max_wm
)
1129 *cursor_wm
= (int)cursor
->max_wm
;
1135 * Check the wm result.
1137 * If any calculated watermark values is larger than the maximum value that
1138 * can be programmed into the associated watermark register, that watermark
1141 static bool g4x_check_srwm(struct drm_device
*dev
,
1142 int display_wm
, int cursor_wm
,
1143 const struct intel_watermark_params
*display
,
1144 const struct intel_watermark_params
*cursor
)
1146 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1147 display_wm
, cursor_wm
);
1149 if (display_wm
> display
->max_wm
) {
1150 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1151 display_wm
, display
->max_wm
);
1155 if (cursor_wm
> cursor
->max_wm
) {
1156 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1157 cursor_wm
, cursor
->max_wm
);
1161 if (!(display_wm
|| cursor_wm
)) {
1162 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1169 static bool g4x_compute_srwm(struct drm_device
*dev
,
1172 const struct intel_watermark_params
*display
,
1173 const struct intel_watermark_params
*cursor
,
1174 int *display_wm
, int *cursor_wm
)
1176 struct drm_crtc
*crtc
;
1177 int hdisplay
, htotal
, pixel_size
, clock
;
1178 unsigned long line_time_us
;
1179 int line_count
, line_size
;
1184 *display_wm
= *cursor_wm
= 0;
1188 crtc
= intel_get_crtc_for_plane(dev
, plane
);
1189 hdisplay
= crtc
->mode
.hdisplay
;
1190 htotal
= crtc
->mode
.htotal
;
1191 clock
= crtc
->mode
.clock
;
1192 pixel_size
= crtc
->fb
->bits_per_pixel
/ 8;
1194 line_time_us
= (htotal
* 1000) / clock
;
1195 line_count
= (latency_ns
/ line_time_us
+ 1000) / 1000;
1196 line_size
= hdisplay
* pixel_size
;
1198 /* Use the minimum of the small and large buffer method for primary */
1199 small
= ((clock
* pixel_size
/ 1000) * latency_ns
) / 1000;
1200 large
= line_count
* line_size
;
1202 entries
= DIV_ROUND_UP(min(small
, large
), display
->cacheline_size
);
1203 *display_wm
= entries
+ display
->guard_size
;
1205 /* calculate the self-refresh watermark for display cursor */
1206 entries
= line_count
* pixel_size
* 64;
1207 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
1208 *cursor_wm
= entries
+ cursor
->guard_size
;
1210 return g4x_check_srwm(dev
,
1211 *display_wm
, *cursor_wm
,
1215 static bool vlv_compute_drain_latency(struct drm_device
*dev
,
1217 int *plane_prec_mult
,
1219 int *cursor_prec_mult
,
1222 struct drm_crtc
*crtc
;
1223 int clock
, pixel_size
;
1226 crtc
= intel_get_crtc_for_plane(dev
, plane
);
1227 if (!intel_crtc_active(crtc
))
1230 clock
= crtc
->mode
.clock
; /* VESA DOT Clock */
1231 pixel_size
= crtc
->fb
->bits_per_pixel
/ 8; /* BPP */
1233 entries
= (clock
/ 1000) * pixel_size
;
1234 *plane_prec_mult
= (entries
> 256) ?
1235 DRAIN_LATENCY_PRECISION_32
: DRAIN_LATENCY_PRECISION_16
;
1236 *plane_dl
= (64 * (*plane_prec_mult
) * 4) / ((clock
/ 1000) *
1239 entries
= (clock
/ 1000) * 4; /* BPP is always 4 for cursor */
1240 *cursor_prec_mult
= (entries
> 256) ?
1241 DRAIN_LATENCY_PRECISION_32
: DRAIN_LATENCY_PRECISION_16
;
1242 *cursor_dl
= (64 * (*cursor_prec_mult
) * 4) / ((clock
/ 1000) * 4);
1248 * Update drain latency registers of memory arbiter
1250 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1251 * to be programmed. Each plane has a drain latency multiplier and a drain
1255 static void vlv_update_drain_latency(struct drm_device
*dev
)
1257 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1258 int planea_prec
, planea_dl
, planeb_prec
, planeb_dl
;
1259 int cursora_prec
, cursora_dl
, cursorb_prec
, cursorb_dl
;
1260 int plane_prec_mult
, cursor_prec_mult
; /* Precision multiplier is
1263 /* For plane A, Cursor A */
1264 if (vlv_compute_drain_latency(dev
, 0, &plane_prec_mult
, &planea_dl
,
1265 &cursor_prec_mult
, &cursora_dl
)) {
1266 cursora_prec
= (cursor_prec_mult
== DRAIN_LATENCY_PRECISION_32
) ?
1267 DDL_CURSORA_PRECISION_32
: DDL_CURSORA_PRECISION_16
;
1268 planea_prec
= (plane_prec_mult
== DRAIN_LATENCY_PRECISION_32
) ?
1269 DDL_PLANEA_PRECISION_32
: DDL_PLANEA_PRECISION_16
;
1271 I915_WRITE(VLV_DDL1
, cursora_prec
|
1272 (cursora_dl
<< DDL_CURSORA_SHIFT
) |
1273 planea_prec
| planea_dl
);
1276 /* For plane B, Cursor B */
1277 if (vlv_compute_drain_latency(dev
, 1, &plane_prec_mult
, &planeb_dl
,
1278 &cursor_prec_mult
, &cursorb_dl
)) {
1279 cursorb_prec
= (cursor_prec_mult
== DRAIN_LATENCY_PRECISION_32
) ?
1280 DDL_CURSORB_PRECISION_32
: DDL_CURSORB_PRECISION_16
;
1281 planeb_prec
= (plane_prec_mult
== DRAIN_LATENCY_PRECISION_32
) ?
1282 DDL_PLANEB_PRECISION_32
: DDL_PLANEB_PRECISION_16
;
1284 I915_WRITE(VLV_DDL2
, cursorb_prec
|
1285 (cursorb_dl
<< DDL_CURSORB_SHIFT
) |
1286 planeb_prec
| planeb_dl
);
1290 #define single_plane_enabled(mask) ((mask) != 0 && powerof2(mask))
1292 static void valleyview_update_wm(struct drm_device
*dev
)
1294 static const int sr_latency_ns
= 12000;
1295 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1296 int planea_wm
, planeb_wm
, cursora_wm
, cursorb_wm
;
1297 int plane_sr
, cursor_sr
;
1298 int ignore_plane_sr
, ignore_cursor_sr
;
1299 unsigned int enabled
= 0;
1301 vlv_update_drain_latency(dev
);
1303 if (g4x_compute_wm0(dev
, 0,
1304 &valleyview_wm_info
, latency_ns
,
1305 &valleyview_cursor_wm_info
, latency_ns
,
1306 &planea_wm
, &cursora_wm
))
1309 if (g4x_compute_wm0(dev
, 1,
1310 &valleyview_wm_info
, latency_ns
,
1311 &valleyview_cursor_wm_info
, latency_ns
,
1312 &planeb_wm
, &cursorb_wm
))
1315 if (single_plane_enabled(enabled
) &&
1316 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1318 &valleyview_wm_info
,
1319 &valleyview_cursor_wm_info
,
1320 &plane_sr
, &ignore_cursor_sr
) &&
1321 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1323 &valleyview_wm_info
,
1324 &valleyview_cursor_wm_info
,
1325 &ignore_plane_sr
, &cursor_sr
)) {
1326 I915_WRITE(FW_BLC_SELF_VLV
, FW_CSPWRDWNEN
);
1328 I915_WRITE(FW_BLC_SELF_VLV
,
1329 I915_READ(FW_BLC_SELF_VLV
) & ~FW_CSPWRDWNEN
);
1330 plane_sr
= cursor_sr
= 0;
1333 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1334 planea_wm
, cursora_wm
,
1335 planeb_wm
, cursorb_wm
,
1336 plane_sr
, cursor_sr
);
1339 (plane_sr
<< DSPFW_SR_SHIFT
) |
1340 (cursorb_wm
<< DSPFW_CURSORB_SHIFT
) |
1341 (planeb_wm
<< DSPFW_PLANEB_SHIFT
) |
1344 (I915_READ(DSPFW2
) & ~DSPFW_CURSORA_MASK
) |
1345 (cursora_wm
<< DSPFW_CURSORA_SHIFT
));
1347 (I915_READ(DSPFW3
) & ~DSPFW_CURSOR_SR_MASK
) |
1348 (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
));
1351 static void g4x_update_wm(struct drm_device
*dev
)
1353 static const int sr_latency_ns
= 12000;
1354 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1355 int planea_wm
, planeb_wm
, cursora_wm
, cursorb_wm
;
1356 int plane_sr
, cursor_sr
;
1357 unsigned int enabled
= 0;
1359 if (g4x_compute_wm0(dev
, 0,
1360 &g4x_wm_info
, latency_ns
,
1361 &g4x_cursor_wm_info
, latency_ns
,
1362 &planea_wm
, &cursora_wm
))
1365 if (g4x_compute_wm0(dev
, 1,
1366 &g4x_wm_info
, latency_ns
,
1367 &g4x_cursor_wm_info
, latency_ns
,
1368 &planeb_wm
, &cursorb_wm
))
1371 if (single_plane_enabled(enabled
) &&
1372 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1375 &g4x_cursor_wm_info
,
1376 &plane_sr
, &cursor_sr
)) {
1377 I915_WRITE(FW_BLC_SELF
, FW_BLC_SELF_EN
);
1379 I915_WRITE(FW_BLC_SELF
,
1380 I915_READ(FW_BLC_SELF
) & ~FW_BLC_SELF_EN
);
1381 plane_sr
= cursor_sr
= 0;
1384 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1385 planea_wm
, cursora_wm
,
1386 planeb_wm
, cursorb_wm
,
1387 plane_sr
, cursor_sr
);
1390 (plane_sr
<< DSPFW_SR_SHIFT
) |
1391 (cursorb_wm
<< DSPFW_CURSORB_SHIFT
) |
1392 (planeb_wm
<< DSPFW_PLANEB_SHIFT
) |
1395 (I915_READ(DSPFW2
) & ~DSPFW_CURSORA_MASK
) |
1396 (cursora_wm
<< DSPFW_CURSORA_SHIFT
));
1397 /* HPLL off in SR has some issues on G4x... disable it */
1399 (I915_READ(DSPFW3
) & ~(DSPFW_HPLL_SR_EN
| DSPFW_CURSOR_SR_MASK
)) |
1400 (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
));
1403 static void i965_update_wm(struct drm_device
*dev
)
1405 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1406 struct drm_crtc
*crtc
;
1410 /* Calc sr entries for one plane configs */
1411 crtc
= single_enabled_crtc(dev
);
1413 /* self-refresh has much higher latency */
1414 static const int sr_latency_ns
= 12000;
1415 int clock
= crtc
->mode
.clock
;
1416 int htotal
= crtc
->mode
.htotal
;
1417 int hdisplay
= crtc
->mode
.hdisplay
;
1418 int pixel_size
= crtc
->fb
->bits_per_pixel
/ 8;
1419 unsigned long line_time_us
;
1422 line_time_us
= ((htotal
* 1000) / clock
);
1424 /* Use ns/us then divide to preserve precision */
1425 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1426 pixel_size
* hdisplay
;
1427 entries
= DIV_ROUND_UP(entries
, I915_FIFO_LINE_SIZE
);
1428 srwm
= I965_FIFO_SIZE
- entries
;
1432 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1435 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1437 entries
= DIV_ROUND_UP(entries
,
1438 i965_cursor_wm_info
.cacheline_size
);
1439 cursor_sr
= i965_cursor_wm_info
.fifo_size
-
1440 (entries
+ i965_cursor_wm_info
.guard_size
);
1442 if (cursor_sr
> i965_cursor_wm_info
.max_wm
)
1443 cursor_sr
= i965_cursor_wm_info
.max_wm
;
1445 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1446 "cursor %d\n", srwm
, cursor_sr
);
1448 if (IS_CRESTLINE(dev
))
1449 I915_WRITE(FW_BLC_SELF
, FW_BLC_SELF_EN
);
1451 /* Turn off self refresh if both pipes are enabled */
1452 if (IS_CRESTLINE(dev
))
1453 I915_WRITE(FW_BLC_SELF
, I915_READ(FW_BLC_SELF
)
1457 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1460 /* 965 has limitations... */
1461 I915_WRITE(DSPFW1
, (srwm
<< DSPFW_SR_SHIFT
) |
1462 (8 << 16) | (8 << 8) | (8 << 0));
1463 I915_WRITE(DSPFW2
, (8 << 8) | (8 << 0));
1464 /* update cursor SR watermark */
1465 I915_WRITE(DSPFW3
, (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
));
1468 static void i9xx_update_wm(struct drm_device
*dev
)
1470 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1471 const struct intel_watermark_params
*wm_info
;
1476 int planea_wm
, planeb_wm
;
1477 struct drm_crtc
*crtc
, *enabled
= NULL
;
1480 wm_info
= &i945_wm_info
;
1481 else if (!IS_GEN2(dev
))
1482 wm_info
= &i915_wm_info
;
1484 wm_info
= &i855_wm_info
;
1486 fifo_size
= dev_priv
->display
.get_fifo_size(dev
, 0);
1487 crtc
= intel_get_crtc_for_plane(dev
, 0);
1488 if (intel_crtc_active(crtc
)) {
1489 int cpp
= crtc
->fb
->bits_per_pixel
/ 8;
1493 planea_wm
= intel_calculate_wm(crtc
->mode
.clock
,
1494 wm_info
, fifo_size
, cpp
,
1498 planea_wm
= fifo_size
- wm_info
->guard_size
;
1500 fifo_size
= dev_priv
->display
.get_fifo_size(dev
, 1);
1501 crtc
= intel_get_crtc_for_plane(dev
, 1);
1502 if (intel_crtc_active(crtc
)) {
1503 int cpp
= crtc
->fb
->bits_per_pixel
/ 8;
1507 planeb_wm
= intel_calculate_wm(crtc
->mode
.clock
,
1508 wm_info
, fifo_size
, cpp
,
1510 if (enabled
== NULL
)
1515 planeb_wm
= fifo_size
- wm_info
->guard_size
;
1517 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm
, planeb_wm
);
1520 * Overlay gets an aggressive default since video jitter is bad.
1524 /* Play safe and disable self-refresh before adjusting watermarks. */
1525 if (IS_I945G(dev
) || IS_I945GM(dev
))
1526 I915_WRITE(FW_BLC_SELF
, FW_BLC_SELF_EN_MASK
| 0);
1527 else if (IS_I915GM(dev
))
1528 I915_WRITE(INSTPM
, I915_READ(INSTPM
) & ~INSTPM_SELF_EN
);
1530 /* Calc sr entries for one plane configs */
1531 if (HAS_FW_BLC(dev
) && enabled
) {
1532 /* self-refresh has much higher latency */
1533 static const int sr_latency_ns
= 6000;
1534 int clock
= enabled
->mode
.clock
;
1535 int htotal
= enabled
->mode
.htotal
;
1536 int hdisplay
= enabled
->mode
.hdisplay
;
1537 int pixel_size
= enabled
->fb
->bits_per_pixel
/ 8;
1538 unsigned long line_time_us
;
1541 line_time_us
= (htotal
* 1000) / clock
;
1543 /* Use ns/us then divide to preserve precision */
1544 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1545 pixel_size
* hdisplay
;
1546 entries
= DIV_ROUND_UP(entries
, wm_info
->cacheline_size
);
1547 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries
);
1548 srwm
= wm_info
->fifo_size
- entries
;
1552 if (IS_I945G(dev
) || IS_I945GM(dev
))
1553 I915_WRITE(FW_BLC_SELF
,
1554 FW_BLC_SELF_FIFO_MASK
| (srwm
& 0xff));
1555 else if (IS_I915GM(dev
))
1556 I915_WRITE(FW_BLC_SELF
, srwm
& 0x3f);
1559 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1560 planea_wm
, planeb_wm
, cwm
, srwm
);
1562 fwater_lo
= ((planeb_wm
& 0x3f) << 16) | (planea_wm
& 0x3f);
1563 fwater_hi
= (cwm
& 0x1f);
1565 /* Set request length to 8 cachelines per fetch */
1566 fwater_lo
= fwater_lo
| (1 << 24) | (1 << 8);
1567 fwater_hi
= fwater_hi
| (1 << 8);
1569 I915_WRITE(FW_BLC
, fwater_lo
);
1570 I915_WRITE(FW_BLC2
, fwater_hi
);
1572 if (HAS_FW_BLC(dev
)) {
1574 if (IS_I945G(dev
) || IS_I945GM(dev
))
1575 I915_WRITE(FW_BLC_SELF
,
1576 FW_BLC_SELF_EN_MASK
| FW_BLC_SELF_EN
);
1577 else if (IS_I915GM(dev
))
1578 I915_WRITE(INSTPM
, I915_READ(INSTPM
) | INSTPM_SELF_EN
);
1579 DRM_DEBUG_KMS("memory self refresh enabled\n");
1581 DRM_DEBUG_KMS("memory self refresh disabled\n");
1585 static void i830_update_wm(struct drm_device
*dev
)
1587 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1588 struct drm_crtc
*crtc
;
1592 crtc
= single_enabled_crtc(dev
);
1596 planea_wm
= intel_calculate_wm(crtc
->mode
.clock
, &i830_wm_info
,
1597 dev_priv
->display
.get_fifo_size(dev
, 0),
1599 fwater_lo
= I915_READ(FW_BLC
) & ~0xfff;
1600 fwater_lo
|= (3<<8) | planea_wm
;
1602 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm
);
1604 I915_WRITE(FW_BLC
, fwater_lo
);
1607 #define ILK_LP0_PLANE_LATENCY 700
1608 #define ILK_LP0_CURSOR_LATENCY 1300
1611 * Check the wm result.
1613 * If any calculated watermark values is larger than the maximum value that
1614 * can be programmed into the associated watermark register, that watermark
1617 static bool ironlake_check_srwm(struct drm_device
*dev
, int level
,
1618 int fbc_wm
, int display_wm
, int cursor_wm
,
1619 const struct intel_watermark_params
*display
,
1620 const struct intel_watermark_params
*cursor
)
1622 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1624 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
1625 " cursor %d\n", level
, display_wm
, fbc_wm
, cursor_wm
);
1627 if (fbc_wm
> SNB_FBC_MAX_SRWM
) {
1628 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
1629 fbc_wm
, SNB_FBC_MAX_SRWM
, level
);
1631 /* fbc has it's own way to disable FBC WM */
1632 I915_WRITE(DISP_ARB_CTL
,
1633 I915_READ(DISP_ARB_CTL
) | DISP_FBC_WM_DIS
);
1637 if (display_wm
> display
->max_wm
) {
1638 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
1639 display_wm
, SNB_DISPLAY_MAX_SRWM
, level
);
1643 if (cursor_wm
> cursor
->max_wm
) {
1644 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
1645 cursor_wm
, SNB_CURSOR_MAX_SRWM
, level
);
1649 if (!(fbc_wm
|| display_wm
|| cursor_wm
)) {
1650 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level
, level
);
1658 * Compute watermark values of WM[1-3],
1660 static bool ironlake_compute_srwm(struct drm_device
*dev
, int level
, int plane
,
1662 const struct intel_watermark_params
*display
,
1663 const struct intel_watermark_params
*cursor
,
1664 int *fbc_wm
, int *display_wm
, int *cursor_wm
)
1666 struct drm_crtc
*crtc
;
1667 unsigned long line_time_us
;
1668 int hdisplay
, htotal
, pixel_size
, clock
;
1669 int line_count
, line_size
;
1674 *fbc_wm
= *display_wm
= *cursor_wm
= 0;
1678 crtc
= intel_get_crtc_for_plane(dev
, plane
);
1679 hdisplay
= crtc
->mode
.hdisplay
;
1680 htotal
= crtc
->mode
.htotal
;
1681 clock
= crtc
->mode
.clock
;
1682 pixel_size
= crtc
->fb
->bits_per_pixel
/ 8;
1684 line_time_us
= (htotal
* 1000) / clock
;
1685 line_count
= (latency_ns
/ line_time_us
+ 1000) / 1000;
1686 line_size
= hdisplay
* pixel_size
;
1688 /* Use the minimum of the small and large buffer method for primary */
1689 small
= ((clock
* pixel_size
/ 1000) * latency_ns
) / 1000;
1690 large
= line_count
* line_size
;
1692 entries
= DIV_ROUND_UP(min(small
, large
), display
->cacheline_size
);
1693 *display_wm
= entries
+ display
->guard_size
;
1697 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
1699 *fbc_wm
= DIV_ROUND_UP(*display_wm
* 64, line_size
) + 2;
1701 /* calculate the self-refresh watermark for display cursor */
1702 entries
= line_count
* pixel_size
* 64;
1703 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
1704 *cursor_wm
= entries
+ cursor
->guard_size
;
1706 return ironlake_check_srwm(dev
, level
,
1707 *fbc_wm
, *display_wm
, *cursor_wm
,
1711 static void ironlake_update_wm(struct drm_device
*dev
)
1713 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1714 int fbc_wm
, plane_wm
, cursor_wm
;
1715 unsigned int enabled
;
1718 if (g4x_compute_wm0(dev
, 0,
1719 &ironlake_display_wm_info
,
1720 ILK_LP0_PLANE_LATENCY
,
1721 &ironlake_cursor_wm_info
,
1722 ILK_LP0_CURSOR_LATENCY
,
1723 &plane_wm
, &cursor_wm
)) {
1724 I915_WRITE(WM0_PIPEA_ILK
,
1725 (plane_wm
<< WM0_PIPE_PLANE_SHIFT
) | cursor_wm
);
1726 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1727 " plane %d, " "cursor: %d\n",
1728 plane_wm
, cursor_wm
);
1732 if (g4x_compute_wm0(dev
, 1,
1733 &ironlake_display_wm_info
,
1734 ILK_LP0_PLANE_LATENCY
,
1735 &ironlake_cursor_wm_info
,
1736 ILK_LP0_CURSOR_LATENCY
,
1737 &plane_wm
, &cursor_wm
)) {
1738 I915_WRITE(WM0_PIPEB_ILK
,
1739 (plane_wm
<< WM0_PIPE_PLANE_SHIFT
) | cursor_wm
);
1740 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1741 " plane %d, cursor: %d\n",
1742 plane_wm
, cursor_wm
);
1747 * Calculate and update the self-refresh watermark only when one
1748 * display plane is used.
1750 I915_WRITE(WM3_LP_ILK
, 0);
1751 I915_WRITE(WM2_LP_ILK
, 0);
1752 I915_WRITE(WM1_LP_ILK
, 0);
1754 if (!single_plane_enabled(enabled
))
1756 enabled
= ffs(enabled
) - 1;
1759 if (!ironlake_compute_srwm(dev
, 1, enabled
,
1760 ILK_READ_WM1_LATENCY() * 500,
1761 &ironlake_display_srwm_info
,
1762 &ironlake_cursor_srwm_info
,
1763 &fbc_wm
, &plane_wm
, &cursor_wm
))
1766 I915_WRITE(WM1_LP_ILK
,
1768 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT
) |
1769 (fbc_wm
<< WM1_LP_FBC_SHIFT
) |
1770 (plane_wm
<< WM1_LP_SR_SHIFT
) |
1774 if (!ironlake_compute_srwm(dev
, 2, enabled
,
1775 ILK_READ_WM2_LATENCY() * 500,
1776 &ironlake_display_srwm_info
,
1777 &ironlake_cursor_srwm_info
,
1778 &fbc_wm
, &plane_wm
, &cursor_wm
))
1781 I915_WRITE(WM2_LP_ILK
,
1783 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT
) |
1784 (fbc_wm
<< WM1_LP_FBC_SHIFT
) |
1785 (plane_wm
<< WM1_LP_SR_SHIFT
) |
1789 * WM3 is unsupported on ILK, probably because we don't have latency
1790 * data for that power state
1794 static void sandybridge_update_wm(struct drm_device
*dev
)
1796 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1797 int latency
= SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1799 int fbc_wm
, plane_wm
, cursor_wm
;
1800 unsigned int enabled
;
1803 if (g4x_compute_wm0(dev
, 0,
1804 &sandybridge_display_wm_info
, latency
,
1805 &sandybridge_cursor_wm_info
, latency
,
1806 &plane_wm
, &cursor_wm
)) {
1807 val
= I915_READ(WM0_PIPEA_ILK
);
1808 val
&= ~(WM0_PIPE_PLANE_MASK
| WM0_PIPE_CURSOR_MASK
);
1809 I915_WRITE(WM0_PIPEA_ILK
, val
|
1810 ((plane_wm
<< WM0_PIPE_PLANE_SHIFT
) | cursor_wm
));
1811 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1812 " plane %d, " "cursor: %d\n",
1813 plane_wm
, cursor_wm
);
1817 if (g4x_compute_wm0(dev
, 1,
1818 &sandybridge_display_wm_info
, latency
,
1819 &sandybridge_cursor_wm_info
, latency
,
1820 &plane_wm
, &cursor_wm
)) {
1821 val
= I915_READ(WM0_PIPEB_ILK
);
1822 val
&= ~(WM0_PIPE_PLANE_MASK
| WM0_PIPE_CURSOR_MASK
);
1823 I915_WRITE(WM0_PIPEB_ILK
, val
|
1824 ((plane_wm
<< WM0_PIPE_PLANE_SHIFT
) | cursor_wm
));
1825 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1826 " plane %d, cursor: %d\n",
1827 plane_wm
, cursor_wm
);
1832 * Calculate and update the self-refresh watermark only when one
1833 * display plane is used.
1835 * SNB support 3 levels of watermark.
1837 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1838 * and disabled in the descending order
1841 I915_WRITE(WM3_LP_ILK
, 0);
1842 I915_WRITE(WM2_LP_ILK
, 0);
1843 I915_WRITE(WM1_LP_ILK
, 0);
1845 if (!single_plane_enabled(enabled
) ||
1846 dev_priv
->sprite_scaling_enabled
)
1848 enabled
= ffs(enabled
) - 1;
1851 if (!ironlake_compute_srwm(dev
, 1, enabled
,
1852 SNB_READ_WM1_LATENCY() * 500,
1853 &sandybridge_display_srwm_info
,
1854 &sandybridge_cursor_srwm_info
,
1855 &fbc_wm
, &plane_wm
, &cursor_wm
))
1858 I915_WRITE(WM1_LP_ILK
,
1860 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT
) |
1861 (fbc_wm
<< WM1_LP_FBC_SHIFT
) |
1862 (plane_wm
<< WM1_LP_SR_SHIFT
) |
1866 if (!ironlake_compute_srwm(dev
, 2, enabled
,
1867 SNB_READ_WM2_LATENCY() * 500,
1868 &sandybridge_display_srwm_info
,
1869 &sandybridge_cursor_srwm_info
,
1870 &fbc_wm
, &plane_wm
, &cursor_wm
))
1873 I915_WRITE(WM2_LP_ILK
,
1875 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT
) |
1876 (fbc_wm
<< WM1_LP_FBC_SHIFT
) |
1877 (plane_wm
<< WM1_LP_SR_SHIFT
) |
1881 if (!ironlake_compute_srwm(dev
, 3, enabled
,
1882 SNB_READ_WM3_LATENCY() * 500,
1883 &sandybridge_display_srwm_info
,
1884 &sandybridge_cursor_srwm_info
,
1885 &fbc_wm
, &plane_wm
, &cursor_wm
))
1888 I915_WRITE(WM3_LP_ILK
,
1890 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT
) |
1891 (fbc_wm
<< WM1_LP_FBC_SHIFT
) |
1892 (plane_wm
<< WM1_LP_SR_SHIFT
) |
1896 static void ivybridge_update_wm(struct drm_device
*dev
)
1898 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1899 int latency
= SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1901 int fbc_wm
, plane_wm
, cursor_wm
;
1902 int ignore_fbc_wm
, ignore_plane_wm
, ignore_cursor_wm
;
1903 unsigned int enabled
;
1906 if (g4x_compute_wm0(dev
, 0,
1907 &sandybridge_display_wm_info
, latency
,
1908 &sandybridge_cursor_wm_info
, latency
,
1909 &plane_wm
, &cursor_wm
)) {
1910 val
= I915_READ(WM0_PIPEA_ILK
);
1911 val
&= ~(WM0_PIPE_PLANE_MASK
| WM0_PIPE_CURSOR_MASK
);
1912 I915_WRITE(WM0_PIPEA_ILK
, val
|
1913 ((plane_wm
<< WM0_PIPE_PLANE_SHIFT
) | cursor_wm
));
1914 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1915 " plane %d, " "cursor: %d\n",
1916 plane_wm
, cursor_wm
);
1920 if (g4x_compute_wm0(dev
, 1,
1921 &sandybridge_display_wm_info
, latency
,
1922 &sandybridge_cursor_wm_info
, latency
,
1923 &plane_wm
, &cursor_wm
)) {
1924 val
= I915_READ(WM0_PIPEB_ILK
);
1925 val
&= ~(WM0_PIPE_PLANE_MASK
| WM0_PIPE_CURSOR_MASK
);
1926 I915_WRITE(WM0_PIPEB_ILK
, val
|
1927 ((plane_wm
<< WM0_PIPE_PLANE_SHIFT
) | cursor_wm
));
1928 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1929 " plane %d, cursor: %d\n",
1930 plane_wm
, cursor_wm
);
1934 if (g4x_compute_wm0(dev
, 2,
1935 &sandybridge_display_wm_info
, latency
,
1936 &sandybridge_cursor_wm_info
, latency
,
1937 &plane_wm
, &cursor_wm
)) {
1938 val
= I915_READ(WM0_PIPEC_IVB
);
1939 val
&= ~(WM0_PIPE_PLANE_MASK
| WM0_PIPE_CURSOR_MASK
);
1940 I915_WRITE(WM0_PIPEC_IVB
, val
|
1941 ((plane_wm
<< WM0_PIPE_PLANE_SHIFT
) | cursor_wm
));
1942 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
1943 " plane %d, cursor: %d\n",
1944 plane_wm
, cursor_wm
);
1949 * Calculate and update the self-refresh watermark only when one
1950 * display plane is used.
1952 * SNB support 3 levels of watermark.
1954 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1955 * and disabled in the descending order
1958 I915_WRITE(WM3_LP_ILK
, 0);
1959 I915_WRITE(WM2_LP_ILK
, 0);
1960 I915_WRITE(WM1_LP_ILK
, 0);
1962 if (!single_plane_enabled(enabled
) ||
1963 dev_priv
->sprite_scaling_enabled
)
1965 enabled
= ffs(enabled
) - 1;
1968 if (!ironlake_compute_srwm(dev
, 1, enabled
,
1969 SNB_READ_WM1_LATENCY() * 500,
1970 &sandybridge_display_srwm_info
,
1971 &sandybridge_cursor_srwm_info
,
1972 &fbc_wm
, &plane_wm
, &cursor_wm
))
1975 I915_WRITE(WM1_LP_ILK
,
1977 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT
) |
1978 (fbc_wm
<< WM1_LP_FBC_SHIFT
) |
1979 (plane_wm
<< WM1_LP_SR_SHIFT
) |
1983 if (!ironlake_compute_srwm(dev
, 2, enabled
,
1984 SNB_READ_WM2_LATENCY() * 500,
1985 &sandybridge_display_srwm_info
,
1986 &sandybridge_cursor_srwm_info
,
1987 &fbc_wm
, &plane_wm
, &cursor_wm
))
1990 I915_WRITE(WM2_LP_ILK
,
1992 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT
) |
1993 (fbc_wm
<< WM1_LP_FBC_SHIFT
) |
1994 (plane_wm
<< WM1_LP_SR_SHIFT
) |
1997 /* WM3, note we have to correct the cursor latency */
1998 if (!ironlake_compute_srwm(dev
, 3, enabled
,
1999 SNB_READ_WM3_LATENCY() * 500,
2000 &sandybridge_display_srwm_info
,
2001 &sandybridge_cursor_srwm_info
,
2002 &fbc_wm
, &plane_wm
, &ignore_cursor_wm
) ||
2003 !ironlake_compute_srwm(dev
, 3, enabled
,
2004 2 * SNB_READ_WM3_LATENCY() * 500,
2005 &sandybridge_display_srwm_info
,
2006 &sandybridge_cursor_srwm_info
,
2007 &ignore_fbc_wm
, &ignore_plane_wm
, &cursor_wm
))
2010 I915_WRITE(WM3_LP_ILK
,
2012 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT
) |
2013 (fbc_wm
<< WM1_LP_FBC_SHIFT
) |
2014 (plane_wm
<< WM1_LP_SR_SHIFT
) |
2019 haswell_update_linetime_wm(struct drm_device
*dev
, int pipe
,
2020 struct drm_display_mode
*mode
)
2022 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2025 temp
= I915_READ(PIPE_WM_LINETIME(pipe
));
2026 temp
&= ~PIPE_WM_LINETIME_MASK
;
2028 /* The WM are computed with base on how long it takes to fill a single
2029 * row at the given clock rate, multiplied by 8.
2031 temp
|= PIPE_WM_LINETIME_TIME(
2032 ((mode
->crtc_hdisplay
* 1000) / mode
->clock
) * 8);
2034 /* IPS watermarks are only used by pipe A, and are ignored by
2035 * pipes B and C. They are calculated similarly to the common
2036 * linetime values, except that we are using CD clock frequency
2037 * in MHz instead of pixel rate for the division.
2039 * This is a placeholder for the IPS watermark calculation code.
2042 I915_WRITE(PIPE_WM_LINETIME(pipe
), temp
);
2046 sandybridge_compute_sprite_wm(struct drm_device
*dev
, int plane
,
2047 uint32_t sprite_width
, int pixel_size
,
2048 const struct intel_watermark_params
*display
,
2049 int display_latency_ns
, int *sprite_wm
)
2051 struct drm_crtc
*crtc
;
2053 int entries
, tlb_miss
;
2055 crtc
= intel_get_crtc_for_plane(dev
, plane
);
2056 if (!intel_crtc_active(crtc
)) {
2057 *sprite_wm
= display
->guard_size
;
2061 clock
= crtc
->mode
.clock
;
2063 /* Use the small buffer method to calculate the sprite watermark */
2064 entries
= ((clock
* pixel_size
/ 1000) * display_latency_ns
) / 1000;
2065 tlb_miss
= display
->fifo_size
*display
->cacheline_size
-
2068 entries
+= tlb_miss
;
2069 entries
= DIV_ROUND_UP(entries
, display
->cacheline_size
);
2070 *sprite_wm
= entries
+ display
->guard_size
;
2071 if (*sprite_wm
> (int)display
->max_wm
)
2072 *sprite_wm
= display
->max_wm
;
2078 sandybridge_compute_sprite_srwm(struct drm_device
*dev
, int plane
,
2079 uint32_t sprite_width
, int pixel_size
,
2080 const struct intel_watermark_params
*display
,
2081 int latency_ns
, int *sprite_wm
)
2083 struct drm_crtc
*crtc
;
2084 unsigned long line_time_us
;
2086 int line_count
, line_size
;
2095 crtc
= intel_get_crtc_for_plane(dev
, plane
);
2096 clock
= crtc
->mode
.clock
;
2102 line_time_us
= (sprite_width
* 1000) / clock
;
2103 if (!line_time_us
) {
2108 line_count
= (latency_ns
/ line_time_us
+ 1000) / 1000;
2109 line_size
= sprite_width
* pixel_size
;
2111 /* Use the minimum of the small and large buffer method for primary */
2112 small
= ((clock
* pixel_size
/ 1000) * latency_ns
) / 1000;
2113 large
= line_count
* line_size
;
2115 entries
= DIV_ROUND_UP(min(small
, large
), display
->cacheline_size
);
2116 *sprite_wm
= entries
+ display
->guard_size
;
2118 return *sprite_wm
> 0x3ff ? false : true;
2121 static void sandybridge_update_sprite_wm(struct drm_device
*dev
, int pipe
,
2122 uint32_t sprite_width
, int pixel_size
)
2124 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2125 int latency
= SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
2132 reg
= WM0_PIPEA_ILK
;
2135 reg
= WM0_PIPEB_ILK
;
2138 reg
= WM0_PIPEC_IVB
;
2141 return; /* bad pipe */
2144 ret
= sandybridge_compute_sprite_wm(dev
, pipe
, sprite_width
, pixel_size
,
2145 &sandybridge_display_wm_info
,
2146 latency
, &sprite_wm
);
2148 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
2153 val
= I915_READ(reg
);
2154 val
&= ~WM0_PIPE_SPRITE_MASK
;
2155 I915_WRITE(reg
, val
| (sprite_wm
<< WM0_PIPE_SPRITE_SHIFT
));
2156 DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe
, sprite_wm
);
2159 ret
= sandybridge_compute_sprite_srwm(dev
, pipe
, sprite_width
,
2161 &sandybridge_display_srwm_info
,
2162 SNB_READ_WM1_LATENCY() * 500,
2165 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
2169 I915_WRITE(WM1S_LP_ILK
, sprite_wm
);
2171 /* Only IVB has two more LP watermarks for sprite */
2172 if (!IS_IVYBRIDGE(dev
))
2175 ret
= sandybridge_compute_sprite_srwm(dev
, pipe
, sprite_width
,
2177 &sandybridge_display_srwm_info
,
2178 SNB_READ_WM2_LATENCY() * 500,
2181 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
2185 I915_WRITE(WM2S_LP_IVB
, sprite_wm
);
2187 ret
= sandybridge_compute_sprite_srwm(dev
, pipe
, sprite_width
,
2189 &sandybridge_display_srwm_info
,
2190 SNB_READ_WM3_LATENCY() * 500,
2193 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
2197 I915_WRITE(WM3S_LP_IVB
, sprite_wm
);
2201 * intel_update_watermarks - update FIFO watermark values based on current modes
2203 * Calculate watermark values for the various WM regs based on current mode
2204 * and plane configuration.
2206 * There are several cases to deal with here:
2207 * - normal (i.e. non-self-refresh)
2208 * - self-refresh (SR) mode
2209 * - lines are large relative to FIFO size (buffer can hold up to 2)
2210 * - lines are small relative to FIFO size (buffer can hold more than 2
2211 * lines), so need to account for TLB latency
2213 * The normal calculation is:
2214 * watermark = dotclock * bytes per pixel * latency
2215 * where latency is platform & configuration dependent (we assume pessimal
2218 * The SR calculation is:
2219 * watermark = (trunc(latency/line time)+1) * surface width *
2222 * line time = htotal / dotclock
2223 * surface width = hdisplay for normal plane and 64 for cursor
2224 * and latency is assumed to be high, as above.
2226 * The final value programmed to the register should always be rounded up,
2227 * and include an extra 2 entries to account for clock crossings.
2229 * We don't use the sprite, so we can ignore that. And on Crestline we have
2230 * to set the non-SR watermarks to 8.
2232 void intel_update_watermarks(struct drm_device
*dev
)
2234 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2236 if (dev_priv
->display
.update_wm
)
2237 dev_priv
->display
.update_wm(dev
);
2240 void intel_update_linetime_watermarks(struct drm_device
*dev
,
2241 int pipe
, struct drm_display_mode
*mode
)
2243 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2245 if (dev_priv
->display
.update_linetime_wm
)
2246 dev_priv
->display
.update_linetime_wm(dev
, pipe
, mode
);
2249 void intel_update_sprite_watermarks(struct drm_device
*dev
, int pipe
,
2250 uint32_t sprite_width
, int pixel_size
)
2252 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2254 if (dev_priv
->display
.update_sprite_wm
)
2255 dev_priv
->display
.update_sprite_wm(dev
, pipe
, sprite_width
,
2259 static struct drm_i915_gem_object
*
2260 intel_alloc_context_page(struct drm_device
*dev
)
2262 struct drm_i915_gem_object
*ctx
;
2265 DRM_LOCK_ASSERT(dev
);
2267 ctx
= i915_gem_alloc_object(dev
, 4096);
2269 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
2273 ret
= i915_gem_object_pin(ctx
, 4096, true, false);
2275 DRM_ERROR("failed to pin power context: %d\n", ret
);
2279 ret
= i915_gem_object_set_to_gtt_domain(ctx
, 1);
2281 DRM_ERROR("failed to set-domain on power context: %d\n", ret
);
2288 i915_gem_object_unpin(ctx
);
2290 drm_gem_object_unreference(&ctx
->base
);
2296 * Lock protecting IPS related data structures
2298 struct lock mchdev_lock
;
2299 LOCK_SYSINIT(mchdev
, &mchdev_lock
, "mchdev", LK_CANRECURSE
);
2301 /* Global for IPS driver to get at the current i915 device. Protected by
2303 struct drm_i915_private
*i915_mch_dev
;
2305 bool ironlake_set_drps(struct drm_device
*dev
, u8 val
)
2307 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2310 rgvswctl
= I915_READ16(MEMSWCTL
);
2311 if (rgvswctl
& MEMCTL_CMD_STS
) {
2312 DRM_DEBUG("gpu busy, RCS change rejected\n");
2313 return false; /* still busy with another command */
2316 rgvswctl
= (MEMCTL_CMD_CHFREQ
<< MEMCTL_CMD_SHIFT
) |
2317 (val
<< MEMCTL_FREQ_SHIFT
) | MEMCTL_SFCAVM
;
2318 I915_WRITE16(MEMSWCTL
, rgvswctl
);
2319 POSTING_READ16(MEMSWCTL
);
2321 rgvswctl
|= MEMCTL_CMD_STS
;
2322 I915_WRITE16(MEMSWCTL
, rgvswctl
);
2327 static void ironlake_enable_drps(struct drm_device
*dev
)
2329 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2330 u32 rgvmodectl
= I915_READ(MEMMODECTL
);
2331 u8 fmax
, fmin
, fstart
, vstart
;
2333 lockmgr(&mchdev_lock
, LK_EXCLUSIVE
);
2335 /* Enable temp reporting */
2336 I915_WRITE16(PMMISC
, I915_READ(PMMISC
) | MCPPCE_EN
);
2337 I915_WRITE16(TSC1
, I915_READ(TSC1
) | TSE
);
2339 /* 100ms RC evaluation intervals */
2340 I915_WRITE(RCUPEI
, 100000);
2341 I915_WRITE(RCDNEI
, 100000);
2343 /* Set max/min thresholds to 90ms and 80ms respectively */
2344 I915_WRITE(RCBMAXAVG
, 90000);
2345 I915_WRITE(RCBMINAVG
, 80000);
2347 I915_WRITE(MEMIHYST
, 1);
2349 /* Set up min, max, and cur for interrupt handling */
2350 fmax
= (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
;
2351 fmin
= (rgvmodectl
& MEMMODE_FMIN_MASK
);
2352 fstart
= (rgvmodectl
& MEMMODE_FSTART_MASK
) >>
2353 MEMMODE_FSTART_SHIFT
;
2355 vstart
= (I915_READ(PXVFREQ_BASE
+ (fstart
* 4)) & PXVFREQ_PX_MASK
) >>
2358 dev_priv
->ips
.fmax
= fmax
; /* IPS callback will increase this */
2359 dev_priv
->ips
.fstart
= fstart
;
2361 dev_priv
->ips
.max_delay
= fstart
;
2362 dev_priv
->ips
.min_delay
= fmin
;
2363 dev_priv
->ips
.cur_delay
= fstart
;
2365 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2366 fmax
, fmin
, fstart
);
2368 I915_WRITE(MEMINTREN
, MEMINT_CX_SUPR_EN
| MEMINT_EVAL_CHG_EN
);
2371 * Interrupts will be enabled in ironlake_irq_postinstall
2374 I915_WRITE(VIDSTART
, vstart
);
2375 POSTING_READ(VIDSTART
);
2377 rgvmodectl
|= MEMMODE_SWMODE_EN
;
2378 I915_WRITE(MEMMODECTL
, rgvmodectl
);
2380 if (wait_for_atomic((I915_READ(MEMSWCTL
) & MEMCTL_CMD_STS
) == 0, 10))
2381 DRM_ERROR("stuck trying to change perf mode\n");
2384 ironlake_set_drps(dev
, fstart
);
2386 dev_priv
->ips
.last_count1
= I915_READ(0x112e4) + I915_READ(0x112e8) +
2388 dev_priv
->ips
.last_time1
= jiffies_to_msecs(jiffies
);
2389 dev_priv
->ips
.last_count2
= I915_READ(0x112f4);
2390 nanotime(&dev_priv
->ips
.last_time2
);
2392 lockmgr(&mchdev_lock
, LK_RELEASE
);
2395 static void ironlake_disable_drps(struct drm_device
*dev
)
2397 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2400 lockmgr(&mchdev_lock
, LK_RELEASE
);
2402 rgvswctl
= I915_READ16(MEMSWCTL
);
2404 /* Ack interrupts, disable EFC interrupt */
2405 I915_WRITE(MEMINTREN
, I915_READ(MEMINTREN
) & ~MEMINT_EVAL_CHG_EN
);
2406 I915_WRITE(MEMINTRSTS
, MEMINT_EVAL_CHG
);
2407 I915_WRITE(DEIER
, I915_READ(DEIER
) & ~DE_PCU_EVENT
);
2408 I915_WRITE(DEIIR
, DE_PCU_EVENT
);
2409 I915_WRITE(DEIMR
, I915_READ(DEIMR
) | DE_PCU_EVENT
);
2411 /* Go back to the starting frequency */
2412 ironlake_set_drps(dev
, dev_priv
->ips
.fstart
);
2414 rgvswctl
|= MEMCTL_CMD_STS
;
2415 I915_WRITE(MEMSWCTL
, rgvswctl
);
2418 lockmgr(&mchdev_lock
, LK_RELEASE
);
2421 /* There's a funny hw issue where the hw returns all 0 when reading from
2422 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
2423 * ourselves, instead of doing a rmw cycle (which might result in us clearing
2424 * all limits and the gpu stuck at whatever frequency it is at atm).
2426 static u32
gen6_rps_limits(struct drm_i915_private
*dev_priv
, u8
*val
)
2432 if (*val
>= dev_priv
->rps
.max_delay
)
2433 *val
= dev_priv
->rps
.max_delay
;
2434 limits
|= dev_priv
->rps
.max_delay
<< 24;
2436 /* Only set the down limit when we've reached the lowest level to avoid
2437 * getting more interrupts, otherwise leave this clear. This prevents a
2438 * race in the hw when coming out of rc6: There's a tiny window where
2439 * the hw runs at the minimal clock before selecting the desired
2440 * frequency, if the down threshold expires in that window we will not
2441 * receive a down interrupt. */
2442 if (*val
<= dev_priv
->rps
.min_delay
) {
2443 *val
= dev_priv
->rps
.min_delay
;
2444 limits
|= dev_priv
->rps
.min_delay
<< 16;
2450 void gen6_set_rps(struct drm_device
*dev
, u8 val
)
2452 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2453 u32 limits
= gen6_rps_limits(dev_priv
, &val
);
2455 WARN_ON(val
> dev_priv
->rps
.max_delay
);
2456 WARN_ON(val
< dev_priv
->rps
.min_delay
);
2458 if (val
== dev_priv
->rps
.cur_delay
)
2461 I915_WRITE(GEN6_RPNSWREQ
,
2462 GEN6_FREQUENCY(val
) |
2464 GEN6_AGGRESSIVE_TURBO
);
2466 /* Make sure we continue to get interrupts
2467 * until we hit the minimum or maximum frequencies.
2469 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
, limits
);
2471 POSTING_READ(GEN6_RPNSWREQ
);
2473 dev_priv
->rps
.cur_delay
= val
;
2476 static void gen6_disable_rps(struct drm_device
*dev
)
2478 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2480 I915_WRITE(GEN6_RC_CONTROL
, 0);
2481 I915_WRITE(GEN6_RPNSWREQ
, 1 << 31);
2482 I915_WRITE(GEN6_PMINTRMSK
, 0xffffffff);
2483 I915_WRITE(GEN6_PMIER
, 0);
2484 /* Complete PM interrupt masking here doesn't race with the rps work
2485 * item again unmasking PM interrupts because that is using a different
2486 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
2487 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
2489 spin_lock(&dev_priv
->rps
.lock
);
2490 dev_priv
->rps
.pm_iir
= 0;
2491 spin_unlock(&dev_priv
->rps
.lock
);
2493 I915_WRITE(GEN6_PMIIR
, I915_READ(GEN6_PMIIR
));
2496 int intel_enable_rc6(const struct drm_device
*dev
)
2498 /* Respect the kernel parameter if it is set */
2499 if (i915_enable_rc6
>= 0)
2500 return i915_enable_rc6
;
2502 /* Disable RC6 on Ironlake */
2503 if (INTEL_INFO(dev
)->gen
== 5)
2506 if (IS_HASWELL(dev
)) {
2507 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
2508 return INTEL_RC6_ENABLE
;
2511 /* snb/ivb have more than one rc6 state. */
2512 if (INTEL_INFO(dev
)->gen
== 6) {
2513 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
2514 return INTEL_RC6_ENABLE
;
2517 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
2518 return (INTEL_RC6_ENABLE
| INTEL_RC6p_ENABLE
);
2521 static void gen6_enable_rps(struct drm_device
*dev
)
2523 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2524 struct intel_ring_buffer
*ring
;
2527 u32 rc6vids
, pcu_mbox
, rc6_mask
= 0;
2532 /* Here begins a magic sequence of register writes to enable
2533 * auto-downclocking.
2535 * Perhaps there might be some value in exposing these to
2538 I915_WRITE(GEN6_RC_STATE
, 0);
2540 /* Clear the DBG now so we don't confuse earlier errors */
2541 if ((gtfifodbg
= I915_READ(GTFIFODBG
))) {
2542 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg
);
2543 I915_WRITE(GTFIFODBG
, gtfifodbg
);
2546 gen6_gt_force_wake_get(dev_priv
);
2548 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
2549 gt_perf_status
= I915_READ(GEN6_GT_PERF_STATUS
);
2551 /* In units of 100MHz */
2552 dev_priv
->rps
.max_delay
= rp_state_cap
& 0xff;
2553 dev_priv
->rps
.min_delay
= (rp_state_cap
& 0xff0000) >> 16;
2554 dev_priv
->rps
.cur_delay
= 0;
2556 /* disable the counters and set deterministic thresholds */
2557 I915_WRITE(GEN6_RC_CONTROL
, 0);
2559 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT
, 1000 << 16);
2560 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16 | 30);
2561 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT
, 30);
2562 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000);
2563 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25);
2565 for_each_ring(ring
, dev_priv
, i
)
2566 I915_WRITE(RING_MAX_IDLE(ring
->mmio_base
), 10);
2568 I915_WRITE(GEN6_RC_SLEEP
, 0);
2569 I915_WRITE(GEN6_RC1e_THRESHOLD
, 1000);
2570 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000);
2571 I915_WRITE(GEN6_RC6p_THRESHOLD
, 150000);
2572 I915_WRITE(GEN6_RC6pp_THRESHOLD
, 64000); /* unused */
2574 /* Check if we are enabling RC6 */
2575 rc6_mode
= intel_enable_rc6(dev_priv
->dev
);
2576 if (rc6_mode
& INTEL_RC6_ENABLE
)
2577 rc6_mask
|= GEN6_RC_CTL_RC6_ENABLE
;
2579 /* We don't use those on Haswell */
2580 if (!IS_HASWELL(dev
)) {
2581 if (rc6_mode
& INTEL_RC6p_ENABLE
)
2582 rc6_mask
|= GEN6_RC_CTL_RC6p_ENABLE
;
2584 if (rc6_mode
& INTEL_RC6pp_ENABLE
)
2585 rc6_mask
|= GEN6_RC_CTL_RC6pp_ENABLE
;
2588 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
2589 (rc6_mask
& GEN6_RC_CTL_RC6_ENABLE
) ? "on" : "off",
2590 (rc6_mask
& GEN6_RC_CTL_RC6p_ENABLE
) ? "on" : "off",
2591 (rc6_mask
& GEN6_RC_CTL_RC6pp_ENABLE
) ? "on" : "off");
2593 I915_WRITE(GEN6_RC_CONTROL
,
2595 GEN6_RC_CTL_EI_MODE(1) |
2596 GEN6_RC_CTL_HW_ENABLE
);
2598 I915_WRITE(GEN6_RPNSWREQ
,
2599 GEN6_FREQUENCY(10) |
2601 GEN6_AGGRESSIVE_TURBO
);
2602 I915_WRITE(GEN6_RC_VIDEO_FREQ
,
2603 GEN6_FREQUENCY(12));
2605 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 1000000);
2606 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
,
2607 dev_priv
->rps
.max_delay
<< 24 |
2608 dev_priv
->rps
.min_delay
<< 16);
2610 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 59400);
2611 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 245000);
2612 I915_WRITE(GEN6_RP_UP_EI
, 66000);
2613 I915_WRITE(GEN6_RP_DOWN_EI
, 350000);
2615 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
2616 I915_WRITE(GEN6_RP_CONTROL
,
2617 GEN6_RP_MEDIA_TURBO
|
2618 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
2619 GEN6_RP_MEDIA_IS_GFX
|
2621 GEN6_RP_UP_BUSY_AVG
|
2622 (IS_HASWELL(dev
) ? GEN7_RP_DOWN_IDLE_AVG
: GEN6_RP_DOWN_IDLE_CONT
));
2624 ret
= sandybridge_pcode_write(dev_priv
, GEN6_PCODE_WRITE_MIN_FREQ_TABLE
, 0);
2627 ret
= sandybridge_pcode_read(dev_priv
, GEN6_READ_OC_PARAMS
, &pcu_mbox
);
2628 if (ret
&& pcu_mbox
& (1<<31)) { /* OC supported */
2629 dev_priv
->rps
.max_delay
= pcu_mbox
& 0xff;
2630 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox
* 50);
2633 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
2636 gen6_set_rps(dev_priv
->dev
, (gt_perf_status
& 0xff00) >> 8);
2638 /* requires MSI enabled */
2639 I915_WRITE(GEN6_PMIER
, GEN6_PM_DEFERRED_EVENTS
);
2640 spin_lock(&dev_priv
->rps
.lock
);
2641 WARN_ON(dev_priv
->rps
.pm_iir
!= 0);
2642 I915_WRITE(GEN6_PMIMR
, 0);
2643 spin_unlock(&dev_priv
->rps
.lock
);
2644 /* enable all PM interrupts */
2645 I915_WRITE(GEN6_PMINTRMSK
, 0);
2648 ret
= sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
2649 if (IS_GEN6(dev
) && ret
) {
2650 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
2651 } else if (IS_GEN6(dev
) && (GEN6_DECODE_RC6_VID(rc6vids
& 0xff) < 450)) {
2652 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
2653 GEN6_DECODE_RC6_VID(rc6vids
& 0xff), 450);
2654 rc6vids
&= 0xffff00;
2655 rc6vids
|= GEN6_ENCODE_RC6_VID(450);
2656 ret
= sandybridge_pcode_write(dev_priv
, GEN6_PCODE_WRITE_RC6VIDS
, rc6vids
);
2658 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
2661 gen6_gt_force_wake_put(dev_priv
);
2664 static void gen6_update_ring_freq(struct drm_device
*dev
)
2666 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2669 unsigned int ia_freq
, max_ia_freq
;
2670 int scaling_factor
= 180;
2673 max_ia_freq
= cpufreq_quick_get_max(0);
2675 * Default to measured freq if none found, PCU will ensure we don't go
2679 max_ia_freq
= tsc_khz
;
2681 max_ia_freq
= tsc_frequency
/ 1000;
2684 /* Convert from kHz to MHz */
2685 max_ia_freq
/= 1000;
2688 * For each potential GPU frequency, load a ring frequency we'd like
2689 * to use for memory access. We do this by specifying the IA frequency
2690 * the PCU should use as a reference to determine the ring frequency.
2692 for (gpu_freq
= dev_priv
->rps
.max_delay
; gpu_freq
>= dev_priv
->rps
.min_delay
;
2694 int diff
= dev_priv
->rps
.max_delay
- gpu_freq
;
2697 * For GPU frequencies less than 750MHz, just use the lowest
2700 if (gpu_freq
< min_freq
)
2703 ia_freq
= max_ia_freq
- ((diff
* scaling_factor
) / 2);
2705 ia_freq
= DIV_ROUND_CLOSEST(ia_freq
, 100);
2707 ia_freq
= (ia_freq
+ 50) / 100;
2709 ia_freq
<<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT
;
2711 sandybridge_pcode_write(dev_priv
,
2712 GEN6_PCODE_WRITE_MIN_FREQ_TABLE
,
2713 ia_freq
| gpu_freq
);
2717 void ironlake_teardown_rc6(struct drm_device
*dev
)
2719 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2721 if (dev_priv
->ips
.renderctx
) {
2722 i915_gem_object_unpin(dev_priv
->ips
.renderctx
);
2723 drm_gem_object_unreference(&dev_priv
->ips
.renderctx
->base
);
2724 dev_priv
->ips
.renderctx
= NULL
;
2727 if (dev_priv
->ips
.pwrctx
) {
2728 i915_gem_object_unpin(dev_priv
->ips
.pwrctx
);
2729 drm_gem_object_unreference(&dev_priv
->ips
.pwrctx
->base
);
2730 dev_priv
->ips
.pwrctx
= NULL
;
2734 static void ironlake_disable_rc6(struct drm_device
*dev
)
2736 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2738 if (I915_READ(PWRCTXA
)) {
2739 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
2740 I915_WRITE(RSTDBYCTL
, I915_READ(RSTDBYCTL
) | RCX_SW_EXIT
);
2741 wait_for(((I915_READ(RSTDBYCTL
) & RSX_STATUS_MASK
) == RSX_STATUS_ON
),
2744 I915_WRITE(PWRCTXA
, 0);
2745 POSTING_READ(PWRCTXA
);
2747 I915_WRITE(RSTDBYCTL
, I915_READ(RSTDBYCTL
) & ~RCX_SW_EXIT
);
2748 POSTING_READ(RSTDBYCTL
);
2752 static int ironlake_setup_rc6(struct drm_device
*dev
)
2754 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2756 if (dev_priv
->ips
.renderctx
== NULL
)
2757 dev_priv
->ips
.renderctx
= intel_alloc_context_page(dev
);
2758 if (!dev_priv
->ips
.renderctx
)
2761 if (dev_priv
->ips
.pwrctx
== NULL
)
2762 dev_priv
->ips
.pwrctx
= intel_alloc_context_page(dev
);
2763 if (!dev_priv
->ips
.pwrctx
) {
2764 ironlake_teardown_rc6(dev
);
2771 static void ironlake_enable_rc6(struct drm_device
*dev
)
2773 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2774 struct intel_ring_buffer
*ring
= &dev_priv
->ring
[RCS
];
2775 bool was_interruptible
;
2778 /* rc6 disabled by default due to repeated reports of hanging during
2781 if (!intel_enable_rc6(dev
))
2784 ret
= ironlake_setup_rc6(dev
);
2788 was_interruptible
= dev_priv
->mm
.interruptible
;
2789 dev_priv
->mm
.interruptible
= false;
2792 * GPU can automatically power down the render unit if given a page
2795 ret
= intel_ring_begin(ring
, 6);
2797 ironlake_teardown_rc6(dev
);
2798 dev_priv
->mm
.interruptible
= was_interruptible
;
2802 intel_ring_emit(ring
, MI_SUSPEND_FLUSH
| MI_SUSPEND_FLUSH_EN
);
2803 intel_ring_emit(ring
, MI_SET_CONTEXT
);
2804 intel_ring_emit(ring
, dev_priv
->ips
.renderctx
->gtt_offset
|
2806 MI_SAVE_EXT_STATE_EN
|
2807 MI_RESTORE_EXT_STATE_EN
|
2808 MI_RESTORE_INHIBIT
);
2809 intel_ring_emit(ring
, MI_SUSPEND_FLUSH
);
2810 intel_ring_emit(ring
, MI_NOOP
);
2811 intel_ring_emit(ring
, MI_FLUSH
);
2812 intel_ring_advance(ring
);
2815 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
2816 * does an implicit flush, combined with MI_FLUSH above, it should be
2817 * safe to assume that renderctx is valid
2819 ret
= intel_ring_idle(ring
);
2820 dev_priv
->mm
.interruptible
= was_interruptible
;
2822 DRM_ERROR("failed to enable ironlake power savings\n");
2823 ironlake_teardown_rc6(dev
);
2827 I915_WRITE(PWRCTXA
, dev_priv
->ips
.pwrctx
->gtt_offset
| PWRCTX_EN
);
2828 I915_WRITE(RSTDBYCTL
, I915_READ(RSTDBYCTL
) & ~RCX_SW_EXIT
);
2831 static unsigned long intel_pxfreq(u32 vidfreq
)
2834 int div
= (vidfreq
& 0x3f0000) >> 16;
2835 int post
= (vidfreq
& 0x3000) >> 12;
2836 int pre
= (vidfreq
& 0x7);
2841 freq
= ((div
* 133333) / ((1<<post
) * pre
));
2846 static const struct cparams
{
2852 { 1, 1333, 301, 28664 },
2853 { 1, 1066, 294, 24460 },
2854 { 1, 800, 294, 25192 },
2855 { 0, 1333, 276, 27605 },
2856 { 0, 1066, 276, 27605 },
2857 { 0, 800, 231, 23784 },
2860 static unsigned long __i915_chipset_val(struct drm_i915_private
*dev_priv
)
2862 u64 total_count
, diff
, ret
;
2863 u32 count1
, count2
, count3
, m
= 0, c
= 0;
2864 unsigned long now
= jiffies_to_msecs(jiffies
), diff1
;
2867 diff1
= now
- dev_priv
->ips
.last_time1
;
2869 /* Prevent division-by-zero if we are asking too fast.
2870 * Also, we don't get interesting results if we are polling
2871 * faster than once in 10ms, so just return the saved value
2875 return dev_priv
->ips
.chipset_power
;
2877 count1
= I915_READ(DMIEC
);
2878 count2
= I915_READ(DDREC
);
2879 count3
= I915_READ(CSIEC
);
2881 total_count
= count1
+ count2
+ count3
;
2883 /* FIXME: handle per-counter overflow */
2884 if (total_count
< dev_priv
->ips
.last_count1
) {
2885 diff
= ~0UL - dev_priv
->ips
.last_count1
;
2886 diff
+= total_count
;
2888 diff
= total_count
- dev_priv
->ips
.last_count1
;
2891 for (i
= 0; i
< ARRAY_SIZE(cparams
); i
++) {
2892 if (cparams
[i
].i
== dev_priv
->ips
.c_m
&&
2893 cparams
[i
].t
== dev_priv
->ips
.r_t
) {
2900 diff
= diff
/ diff1
;
2901 ret
= ((m
* diff
) + c
);
2904 dev_priv
->ips
.last_count1
= total_count
;
2905 dev_priv
->ips
.last_time1
= now
;
2907 dev_priv
->ips
.chipset_power
= ret
;
2912 unsigned long i915_chipset_val(struct drm_i915_private
*dev_priv
)
2916 if (dev_priv
->info
->gen
!= 5)
2919 lockmgr(&mchdev_lock
, LK_EXCLUSIVE
);
2921 val
= __i915_chipset_val(dev_priv
);
2923 lockmgr(&mchdev_lock
, LK_RELEASE
);
2928 unsigned long i915_mch_val(struct drm_i915_private
*dev_priv
)
2930 unsigned long m
, x
, b
;
2933 tsfs
= I915_READ(TSFS
);
2935 m
= ((tsfs
& TSFS_SLOPE_MASK
) >> TSFS_SLOPE_SHIFT
);
2936 x
= I915_READ8(TR1
);
2938 b
= tsfs
& TSFS_INTR_MASK
;
2940 return ((m
* x
) / 127) - b
;
2943 static u16
pvid_to_extvid(struct drm_i915_private
*dev_priv
, u8 pxvid
)
2945 static const struct v_table
{
2946 u16 vd
; /* in .1 mil */
2947 u16 vm
; /* in .1 mil */
3078 if (dev_priv
->info
->is_mobile
)
3079 return v_table
[pxvid
].vm
;
3081 return v_table
[pxvid
].vd
;
3084 static void __i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
3086 struct timespec now
, diff1
;
3088 unsigned long diffms
;
3093 timespecsub(&diff1
, &dev_priv
->ips
.last_time2
);
3095 /* Don't divide by 0 */
3096 diffms
= diff1
.tv_sec
* 1000 + diff1
.tv_nsec
/ 1000000;
3100 count
= I915_READ(GFXEC
);
3102 if (count
< dev_priv
->ips
.last_count2
) {
3103 diff
= ~0UL - dev_priv
->ips
.last_count2
;
3106 diff
= count
- dev_priv
->ips
.last_count2
;
3109 dev_priv
->ips
.last_count2
= count
;
3110 dev_priv
->ips
.last_time2
= now
;
3112 /* More magic constants... */
3114 diff
= diff
/ (diffms
* 10);
3115 dev_priv
->ips
.gfx_power
= diff
;
3118 void i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
3120 if (dev_priv
->info
->gen
!= 5)
3123 lockmgr(&mchdev_lock
, LK_EXCLUSIVE
);
3125 __i915_update_gfx_val(dev_priv
);
3127 lockmgr(&mchdev_lock
, LK_RELEASE
);
3130 static unsigned long __i915_gfx_val(struct drm_i915_private
*dev_priv
)
3132 unsigned long t
, corr
, state1
, corr2
, state2
;
3135 pxvid
= I915_READ(PXVFREQ_BASE
+ (dev_priv
->rps
.cur_delay
* 4));
3136 pxvid
= (pxvid
>> 24) & 0x7f;
3137 ext_v
= pvid_to_extvid(dev_priv
, pxvid
);
3141 t
= i915_mch_val(dev_priv
);
3143 /* Revel in the empirically derived constants */
3145 /* Correction factor in 1/100000 units */
3147 corr
= ((t
* 2349) + 135940);
3149 corr
= ((t
* 964) + 29317);
3151 corr
= ((t
* 301) + 1004);
3153 corr
= corr
* ((150142 * state1
) / 10000 - 78642);
3155 corr2
= (corr
* dev_priv
->ips
.corr
);
3157 state2
= (corr2
* state1
) / 10000;
3158 state2
/= 100; /* convert to mW */
3160 __i915_update_gfx_val(dev_priv
);
3162 return dev_priv
->ips
.gfx_power
+ state2
;
3165 unsigned long i915_gfx_val(struct drm_i915_private
*dev_priv
)
3169 if (dev_priv
->info
->gen
!= 5)
3172 lockmgr(&mchdev_lock
, LK_EXCLUSIVE
);
3174 val
= __i915_gfx_val(dev_priv
);
3176 lockmgr(&mchdev_lock
, LK_RELEASE
);
3182 * i915_read_mch_val - return value for IPS use
3184 * Calculate and return a value for the IPS driver to use when deciding whether
3185 * we have thermal and power headroom to increase CPU or GPU power budget.
3187 unsigned long i915_read_mch_val(void)
3189 struct drm_i915_private
*dev_priv
;
3190 unsigned long chipset_val
, graphics_val
, ret
= 0;
3192 lockmgr(&mchdev_lock
, LK_EXCLUSIVE
);
3195 dev_priv
= i915_mch_dev
;
3197 chipset_val
= __i915_chipset_val(dev_priv
);
3198 graphics_val
= __i915_gfx_val(dev_priv
);
3200 ret
= chipset_val
+ graphics_val
;
3203 lockmgr(&mchdev_lock
, LK_RELEASE
);
3209 * i915_gpu_raise - raise GPU frequency limit
3211 * Raise the limit; IPS indicates we have thermal headroom.
3213 bool i915_gpu_raise(void)
3215 struct drm_i915_private
*dev_priv
;
3218 lockmgr(&mchdev_lock
, LK_EXCLUSIVE
);
3219 if (!i915_mch_dev
) {
3223 dev_priv
= i915_mch_dev
;
3225 if (dev_priv
->ips
.max_delay
> dev_priv
->ips
.fmax
)
3226 dev_priv
->ips
.max_delay
--;
3229 lockmgr(&mchdev_lock
, LK_RELEASE
);
3235 * i915_gpu_lower - lower GPU frequency limit
3237 * IPS indicates we're close to a thermal limit, so throttle back the GPU
3238 * frequency maximum.
3240 bool i915_gpu_lower(void)
3242 struct drm_i915_private
*dev_priv
;
3245 lockmgr(&mchdev_lock
, LK_EXCLUSIVE
);
3246 if (!i915_mch_dev
) {
3250 dev_priv
= i915_mch_dev
;
3252 if (dev_priv
->ips
.max_delay
< dev_priv
->ips
.min_delay
)
3253 dev_priv
->ips
.max_delay
++;
3256 lockmgr(&mchdev_lock
, LK_RELEASE
);
3262 * i915_gpu_busy - indicate GPU business to IPS
3264 * Tell the IPS driver whether or not the GPU is busy.
3266 bool i915_gpu_busy(void)
3268 struct drm_i915_private
*dev_priv
;
3269 struct intel_ring_buffer
*ring
;
3273 lockmgr(&mchdev_lock
, LK_EXCLUSIVE
);
3276 dev_priv
= i915_mch_dev
;
3278 for_each_ring(ring
, dev_priv
, i
)
3279 ret
|= !list_empty(&ring
->request_list
);
3282 lockmgr(&mchdev_lock
, LK_RELEASE
);
3288 * i915_gpu_turbo_disable - disable graphics turbo
3290 * Disable graphics turbo by resetting the max frequency and setting the
3291 * current frequency to the default.
3293 bool i915_gpu_turbo_disable(void)
3295 struct drm_i915_private
*dev_priv
;
3298 lockmgr(&mchdev_lock
, LK_EXCLUSIVE
);
3299 if (!i915_mch_dev
) {
3303 dev_priv
= i915_mch_dev
;
3305 dev_priv
->ips
.max_delay
= dev_priv
->ips
.fstart
;
3307 if (!ironlake_set_drps(dev_priv
->dev
, dev_priv
->ips
.fstart
))
3311 lockmgr(&mchdev_lock
, LK_RELEASE
);
3316 void intel_gpu_ips_init(struct drm_i915_private
*dev_priv
)
3318 /* We only register the i915 ips part with intel-ips once everything is
3319 * set up, to avoid intel-ips sneaking in and reading bogus values. */
3320 lockmgr(&mchdev_lock
, LK_EXCLUSIVE
);
3321 i915_mch_dev
= dev_priv
;
3322 lockmgr(&mchdev_lock
, LK_RELEASE
);
3325 void intel_gpu_ips_teardown(void)
3327 lockmgr(&mchdev_lock
, LK_EXCLUSIVE
);
3328 i915_mch_dev
= NULL
;
3329 lockmgr(&mchdev_lock
, LK_RELEASE
);
3332 static void intel_init_emon(struct drm_device
*dev
)
3334 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3339 /* Disable to program */
3343 /* Program energy weights for various events */
3344 I915_WRITE(SDEW
, 0x15040d00);
3345 I915_WRITE(CSIEW0
, 0x007f0000);
3346 I915_WRITE(CSIEW1
, 0x1e220004);
3347 I915_WRITE(CSIEW2
, 0x04000004);
3349 for (i
= 0; i
< 5; i
++)
3350 I915_WRITE(PEW
+ (i
* 4), 0);
3351 for (i
= 0; i
< 3; i
++)
3352 I915_WRITE(DEW
+ (i
* 4), 0);
3354 /* Program P-state weights to account for frequency power adjustment */
3355 for (i
= 0; i
< 16; i
++) {
3356 u32 pxvidfreq
= I915_READ(PXVFREQ_BASE
+ (i
* 4));
3357 unsigned long freq
= intel_pxfreq(pxvidfreq
);
3358 unsigned long vid
= (pxvidfreq
& PXVFREQ_PX_MASK
) >>
3363 val
*= (freq
/ 1000);
3365 val
/= (127*127*900);
3367 DRM_ERROR("bad pxval: %ld\n", val
);
3370 /* Render standby states get 0 weight */
3374 for (i
= 0; i
< 4; i
++) {
3375 u32 val
= (pxw
[i
*4] << 24) | (pxw
[(i
*4)+1] << 16) |
3376 (pxw
[(i
*4)+2] << 8) | (pxw
[(i
*4)+3]);
3377 I915_WRITE(PXW
+ (i
* 4), val
);
3380 /* Adjust magic regs to magic values (more experimental results) */
3381 I915_WRITE(OGW0
, 0);
3382 I915_WRITE(OGW1
, 0);
3383 I915_WRITE(EG0
, 0x00007f00);
3384 I915_WRITE(EG1
, 0x0000000e);
3385 I915_WRITE(EG2
, 0x000e0000);
3386 I915_WRITE(EG3
, 0x68000300);
3387 I915_WRITE(EG4
, 0x42000000);
3388 I915_WRITE(EG5
, 0x00140031);
3392 for (i
= 0; i
< 8; i
++)
3393 I915_WRITE(PXWL
+ (i
* 4), 0);
3395 /* Enable PMON + select events */
3396 I915_WRITE(ECR
, 0x80000019);
3398 lcfuse
= I915_READ(LCFUSE02
);
3400 dev_priv
->ips
.corr
= (lcfuse
& LCFUSE_HIV_MASK
);
3403 void intel_disable_gt_powersave(struct drm_device
*dev
)
3405 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3407 if (IS_IRONLAKE_M(dev
)) {
3408 ironlake_disable_drps(dev
);
3409 ironlake_disable_rc6(dev
);
3410 } else if (INTEL_INFO(dev
)->gen
>= 6 && !IS_VALLEYVIEW(dev
)) {
3411 cancel_delayed_work_sync(&dev_priv
->rps
.delayed_resume_work
);
3412 lockmgr(&dev_priv
->rps
.hw_lock
, LK_EXCLUSIVE
);
3413 gen6_disable_rps(dev
);
3414 lockmgr(&dev_priv
->rps
.hw_lock
, LK_RELEASE
);
3418 static void intel_gen6_powersave_work(struct work_struct
*work
)
3420 struct drm_i915_private
*dev_priv
=
3421 container_of(work
, struct drm_i915_private
,
3422 rps
.delayed_resume_work
.work
);
3423 struct drm_device
*dev
= dev_priv
->dev
;
3425 lockmgr(&dev_priv
->rps
.hw_lock
, LK_EXCLUSIVE
);
3426 gen6_enable_rps(dev
);
3427 gen6_update_ring_freq(dev
);
3428 lockmgr(&dev_priv
->rps
.hw_lock
, LK_RELEASE
);
3431 void intel_enable_gt_powersave(struct drm_device
*dev
)
3433 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3435 if (IS_IRONLAKE_M(dev
)) {
3436 ironlake_enable_drps(dev
);
3437 ironlake_enable_rc6(dev
);
3438 intel_init_emon(dev
);
3439 } else if ((IS_GEN6(dev
) || IS_GEN7(dev
)) && !IS_VALLEYVIEW(dev
)) {
3441 * PCU communication is slow and this doesn't need to be
3442 * done at any specific time, so do this out of our fast path
3443 * to make resume and init faster.
3445 schedule_delayed_work(&dev_priv
->rps
.delayed_resume_work
,
3446 round_jiffies_up_relative(hz
));
3450 static void ibx_init_clock_gating(struct drm_device
*dev
)
3452 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3455 * On Ibex Peak and Cougar Point, we need to disable clock
3456 * gating for the panel power sequencer or it will fail to
3457 * start up when no ports are active.
3459 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
);
3462 static void ironlake_init_clock_gating(struct drm_device
*dev
)
3464 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3465 uint32_t dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
3467 /* Required for FBC */
3468 dspclk_gate
|= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE
|
3469 ILK_DPFCUNIT_CLOCK_GATE_DISABLE
|
3470 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
;
3472 I915_WRITE(PCH_3DCGDIS0
,
3473 MARIUNIT_CLOCK_GATE_DISABLE
|
3474 SVSMUNIT_CLOCK_GATE_DISABLE
);
3475 I915_WRITE(PCH_3DCGDIS1
,
3476 VFMUNIT_CLOCK_GATE_DISABLE
);
3479 * According to the spec the following bits should be set in
3480 * order to enable memory self-refresh
3481 * The bit 22/21 of 0x42004
3482 * The bit 5 of 0x42020
3483 * The bit 15 of 0x45000
3485 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
3486 (I915_READ(ILK_DISPLAY_CHICKEN2
) |
3487 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
));
3488 dspclk_gate
|= ILK_DPARBUNIT_CLOCK_GATE_ENABLE
;
3489 I915_WRITE(DISP_ARB_CTL
,
3490 (I915_READ(DISP_ARB_CTL
) |
3492 I915_WRITE(WM3_LP_ILK
, 0);
3493 I915_WRITE(WM2_LP_ILK
, 0);
3494 I915_WRITE(WM1_LP_ILK
, 0);
3497 * Based on the document from hardware guys the following bits
3498 * should be set unconditionally in order to enable FBC.
3499 * The bit 22 of 0x42000
3500 * The bit 22 of 0x42004
3501 * The bit 7,8,9 of 0x42020.
3503 if (IS_IRONLAKE_M(dev
)) {
3504 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
3505 I915_READ(ILK_DISPLAY_CHICKEN1
) |
3507 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
3508 I915_READ(ILK_DISPLAY_CHICKEN2
) |
3512 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
3514 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
3515 I915_READ(ILK_DISPLAY_CHICKEN2
) |
3516 ILK_ELPIN_409_SELECT
);
3517 I915_WRITE(_3D_CHICKEN2
,
3518 _3D_CHICKEN2_WM_READ_PIPELINED
<< 16 |
3519 _3D_CHICKEN2_WM_READ_PIPELINED
);
3521 /* WaDisableRenderCachePipelinedFlush */
3522 I915_WRITE(CACHE_MODE_0
,
3523 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
3525 ibx_init_clock_gating(dev
);
3528 static void cpt_init_clock_gating(struct drm_device
*dev
)
3530 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3535 * On Ibex Peak and Cougar Point, we need to disable clock
3536 * gating for the panel power sequencer or it will fail to
3537 * start up when no ports are active.
3539 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
);
3540 I915_WRITE(SOUTH_CHICKEN2
, I915_READ(SOUTH_CHICKEN2
) |
3541 DPLS_EDP_PPS_FIX_DIS
);
3542 /* The below fixes the weird display corruption, a few pixels shifted
3543 * downward, on (only) LVDS of some HP laptops with IVY.
3545 for_each_pipe(pipe
) {
3546 val
= TRANS_CHICKEN2_TIMING_OVERRIDE
;
3547 if (dev_priv
->fdi_rx_polarity_inverted
)
3548 val
|= TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
3549 I915_WRITE(TRANS_CHICKEN2(pipe
), val
);
3551 /* WADP0ClockGatingDisable */
3552 for_each_pipe(pipe
) {
3553 I915_WRITE(TRANS_CHICKEN1(pipe
),
3554 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
3558 static void gen6_init_clock_gating(struct drm_device
*dev
)
3560 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3562 uint32_t dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
3564 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
3566 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
3567 I915_READ(ILK_DISPLAY_CHICKEN2
) |
3568 ILK_ELPIN_409_SELECT
);
3570 /* WaDisableHiZPlanesWhenMSAAEnabled */
3571 I915_WRITE(_3D_CHICKEN
,
3572 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB
));
3574 /* WaSetupGtModeTdRowDispatch */
3575 if (IS_SNB_GT1(dev
))
3576 I915_WRITE(GEN6_GT_MODE
,
3577 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE
));
3579 I915_WRITE(WM3_LP_ILK
, 0);
3580 I915_WRITE(WM2_LP_ILK
, 0);
3581 I915_WRITE(WM1_LP_ILK
, 0);
3583 I915_WRITE(CACHE_MODE_0
,
3584 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB
));
3586 I915_WRITE(GEN6_UCGCTL1
,
3587 I915_READ(GEN6_UCGCTL1
) |
3588 GEN6_BLBUNIT_CLOCK_GATE_DISABLE
|
3589 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
3591 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
3592 * gating disable must be set. Failure to set it results in
3593 * flickering pixels due to Z write ordering failures after
3594 * some amount of runtime in the Mesa "fire" demo, and Unigine
3595 * Sanctuary and Tropics, and apparently anything else with
3596 * alpha test or pixel discard.
3598 * According to the spec, bit 11 (RCCUNIT) must also be set,
3599 * but we didn't debug actual testcases to find it out.
3601 * Also apply WaDisableVDSUnitClockGating and
3602 * WaDisableRCPBUnitClockGating.
3604 I915_WRITE(GEN6_UCGCTL2
,
3605 GEN7_VDSUNIT_CLOCK_GATE_DISABLE
|
3606 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE
|
3607 GEN6_RCCUNIT_CLOCK_GATE_DISABLE
);
3609 /* Bspec says we need to always set all mask bits. */
3610 I915_WRITE(_3D_CHICKEN3
, (0xFFFF << 16) |
3611 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL
);
3614 * According to the spec the following bits should be
3615 * set in order to enable memory self-refresh and fbc:
3616 * The bit21 and bit22 of 0x42000
3617 * The bit21 and bit22 of 0x42004
3618 * The bit5 and bit7 of 0x42020
3619 * The bit14 of 0x70180
3620 * The bit14 of 0x71180
3622 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
3623 I915_READ(ILK_DISPLAY_CHICKEN1
) |
3624 ILK_FBCQ_DIS
| ILK_PABSTRETCH_DIS
);
3625 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
3626 I915_READ(ILK_DISPLAY_CHICKEN2
) |
3627 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
);
3628 I915_WRITE(ILK_DSPCLK_GATE_D
,
3629 I915_READ(ILK_DSPCLK_GATE_D
) |
3630 ILK_DPARBUNIT_CLOCK_GATE_ENABLE
|
3631 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
);
3633 /* WaMbcDriverBootEnable */
3634 I915_WRITE(GEN6_MBCTL
, I915_READ(GEN6_MBCTL
) |
3635 GEN6_MBCTL_ENABLE_BOOT_FETCH
);
3637 for_each_pipe(pipe
) {
3638 I915_WRITE(DSPCNTR(pipe
),
3639 I915_READ(DSPCNTR(pipe
)) |
3640 DISPPLANE_TRICKLE_FEED_DISABLE
);
3641 intel_flush_display_plane(dev_priv
, pipe
);
3644 /* The default value should be 0x200 according to docs, but the two
3645 * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
3646 I915_WRITE(GEN6_GT_MODE
, _MASKED_BIT_DISABLE(0xffff));
3647 I915_WRITE(GEN6_GT_MODE
, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI
));
3649 cpt_init_clock_gating(dev
);
3652 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private
*dev_priv
)
3654 uint32_t reg
= I915_READ(GEN7_FF_THREAD_MODE
);
3656 reg
&= ~GEN7_FF_SCHED_MASK
;
3657 reg
|= GEN7_FF_TS_SCHED_HW
;
3658 reg
|= GEN7_FF_VS_SCHED_HW
;
3659 reg
|= GEN7_FF_DS_SCHED_HW
;
3661 I915_WRITE(GEN7_FF_THREAD_MODE
, reg
);
3664 static void lpt_init_clock_gating(struct drm_device
*dev
)
3666 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3669 * TODO: this bit should only be enabled when really needed, then
3670 * disabled when not needed anymore in order to save power.
3672 if (dev_priv
->pch_id
== INTEL_PCH_LPT_LP_DEVICE_ID_TYPE
)
3673 I915_WRITE(SOUTH_DSPCLK_GATE_D
,
3674 I915_READ(SOUTH_DSPCLK_GATE_D
) |
3675 PCH_LP_PARTITION_LEVEL_DISABLE
);
3678 static void haswell_init_clock_gating(struct drm_device
*dev
)
3680 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3683 I915_WRITE(WM3_LP_ILK
, 0);
3684 I915_WRITE(WM2_LP_ILK
, 0);
3685 I915_WRITE(WM1_LP_ILK
, 0);
3687 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3688 * This implements the WaDisableRCZUnitClockGating workaround.
3690 I915_WRITE(GEN6_UCGCTL2
, GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
3692 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3693 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1
,
3694 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC
);
3696 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3697 I915_WRITE(GEN7_L3CNTLREG1
,
3698 GEN7_WA_FOR_GEN7_L3_CONTROL
);
3699 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER
,
3700 GEN7_WA_L3_CHICKEN_MODE
);
3702 /* This is required by WaCatErrorRejectionIssue */
3703 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
3704 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
3705 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
3707 for_each_pipe(pipe
) {
3708 I915_WRITE(DSPCNTR(pipe
),
3709 I915_READ(DSPCNTR(pipe
)) |
3710 DISPPLANE_TRICKLE_FEED_DISABLE
);
3711 intel_flush_display_plane(dev_priv
, pipe
);
3714 gen7_setup_fixed_func_scheduler(dev_priv
);
3716 /* WaDisable4x2SubspanOptimization */
3717 I915_WRITE(CACHE_MODE_1
,
3718 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
3720 /* WaMbcDriverBootEnable */
3721 I915_WRITE(GEN6_MBCTL
, I915_READ(GEN6_MBCTL
) |
3722 GEN6_MBCTL_ENABLE_BOOT_FETCH
);
3724 /* XXX: This is a workaround for early silicon revisions and should be
3729 WM_DBG_DISALLOW_MULTIPLE_LP
|
3730 WM_DBG_DISALLOW_SPRITE
|
3731 WM_DBG_DISALLOW_MAXFIFO
);
3733 lpt_init_clock_gating(dev
);
3736 static void ivybridge_init_clock_gating(struct drm_device
*dev
)
3738 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3742 I915_WRITE(WM3_LP_ILK
, 0);
3743 I915_WRITE(WM2_LP_ILK
, 0);
3744 I915_WRITE(WM1_LP_ILK
, 0);
3746 I915_WRITE(ILK_DSPCLK_GATE_D
, ILK_VRHUNIT_CLOCK_GATE_DISABLE
);
3748 /* WaDisableEarlyCull */
3749 I915_WRITE(_3D_CHICKEN3
,
3750 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
3752 /* WaDisableBackToBackFlipFix */
3753 I915_WRITE(IVB_CHICKEN3
,
3754 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
3755 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
3757 /* WaDisablePSDDualDispatchEnable */
3758 if (IS_IVB_GT1(dev
))
3759 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
3760 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
3762 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2
,
3763 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
3765 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3766 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1
,
3767 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC
);
3769 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3770 I915_WRITE(GEN7_L3CNTLREG1
,
3771 GEN7_WA_FOR_GEN7_L3_CONTROL
);
3772 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER
,
3773 GEN7_WA_L3_CHICKEN_MODE
);
3774 if (IS_IVB_GT1(dev
))
3775 I915_WRITE(GEN7_ROW_CHICKEN2
,
3776 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
3778 I915_WRITE(GEN7_ROW_CHICKEN2_GT2
,
3779 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
3782 /* WaForceL3Serialization */
3783 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
3784 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
3786 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
3787 * gating disable must be set. Failure to set it results in
3788 * flickering pixels due to Z write ordering failures after
3789 * some amount of runtime in the Mesa "fire" demo, and Unigine
3790 * Sanctuary and Tropics, and apparently anything else with
3791 * alpha test or pixel discard.
3793 * According to the spec, bit 11 (RCCUNIT) must also be set,
3794 * but we didn't debug actual testcases to find it out.
3796 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3797 * This implements the WaDisableRCZUnitClockGating workaround.
3799 I915_WRITE(GEN6_UCGCTL2
,
3800 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
|
3801 GEN6_RCCUNIT_CLOCK_GATE_DISABLE
);
3803 /* This is required by WaCatErrorRejectionIssue */
3804 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
3805 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
3806 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
3808 for_each_pipe(pipe
) {
3809 I915_WRITE(DSPCNTR(pipe
),
3810 I915_READ(DSPCNTR(pipe
)) |
3811 DISPPLANE_TRICKLE_FEED_DISABLE
);
3812 intel_flush_display_plane(dev_priv
, pipe
);
3815 /* WaMbcDriverBootEnable */
3816 I915_WRITE(GEN6_MBCTL
, I915_READ(GEN6_MBCTL
) |
3817 GEN6_MBCTL_ENABLE_BOOT_FETCH
);
3819 gen7_setup_fixed_func_scheduler(dev_priv
);
3821 /* WaDisable4x2SubspanOptimization */
3822 I915_WRITE(CACHE_MODE_1
,
3823 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
3825 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
3826 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
3827 snpcr
|= GEN6_MBC_SNPCR_MED
;
3828 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
3830 cpt_init_clock_gating(dev
);
3833 static void valleyview_init_clock_gating(struct drm_device
*dev
)
3835 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3838 I915_WRITE(WM3_LP_ILK
, 0);
3839 I915_WRITE(WM2_LP_ILK
, 0);
3840 I915_WRITE(WM1_LP_ILK
, 0);
3842 I915_WRITE(ILK_DSPCLK_GATE_D
, ILK_VRHUNIT_CLOCK_GATE_DISABLE
);
3844 /* WaDisableEarlyCull */
3845 I915_WRITE(_3D_CHICKEN3
,
3846 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
3848 /* WaDisableBackToBackFlipFix */
3849 I915_WRITE(IVB_CHICKEN3
,
3850 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
3851 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
3853 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
3854 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
3856 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3857 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1
,
3858 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC
);
3860 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3861 I915_WRITE(GEN7_L3CNTLREG1
, I915_READ(GEN7_L3CNTLREG1
) | GEN7_L3AGDIS
);
3862 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER
, GEN7_WA_L3_CHICKEN_MODE
);
3864 /* WaForceL3Serialization */
3865 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
3866 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
3868 /* WaDisableDopClockGating */
3869 I915_WRITE(GEN7_ROW_CHICKEN2
,
3870 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
3872 /* WaForceL3Serialization */
3873 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
3874 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
3876 /* This is required by WaCatErrorRejectionIssue */
3877 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
3878 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
3879 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
3881 /* WaMbcDriverBootEnable */
3882 I915_WRITE(GEN6_MBCTL
, I915_READ(GEN6_MBCTL
) |
3883 GEN6_MBCTL_ENABLE_BOOT_FETCH
);
3886 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
3887 * gating disable must be set. Failure to set it results in
3888 * flickering pixels due to Z write ordering failures after
3889 * some amount of runtime in the Mesa "fire" demo, and Unigine
3890 * Sanctuary and Tropics, and apparently anything else with
3891 * alpha test or pixel discard.
3893 * According to the spec, bit 11 (RCCUNIT) must also be set,
3894 * but we didn't debug actual testcases to find it out.
3896 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3897 * This implements the WaDisableRCZUnitClockGating workaround.
3899 * Also apply WaDisableVDSUnitClockGating and
3900 * WaDisableRCPBUnitClockGating.
3902 I915_WRITE(GEN6_UCGCTL2
,
3903 GEN7_VDSUNIT_CLOCK_GATE_DISABLE
|
3904 GEN7_TDLUNIT_CLOCK_GATE_DISABLE
|
3905 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
|
3906 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE
|
3907 GEN6_RCCUNIT_CLOCK_GATE_DISABLE
);
3909 I915_WRITE(GEN7_UCGCTL4
, GEN7_L3BANK2X_CLOCK_GATE_DISABLE
);
3911 for_each_pipe(pipe
) {
3912 I915_WRITE(DSPCNTR(pipe
),
3913 I915_READ(DSPCNTR(pipe
)) |
3914 DISPPLANE_TRICKLE_FEED_DISABLE
);
3915 intel_flush_display_plane(dev_priv
, pipe
);
3918 I915_WRITE(CACHE_MODE_1
,
3919 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
3922 * On ValleyView, the GUnit needs to signal the GT
3923 * when flip and other events complete. So enable
3924 * all the GUnit->GT interrupts here
3926 I915_WRITE(VLV_DPFLIPSTAT
, PIPEB_LINE_COMPARE_INT_EN
|
3927 PIPEB_HLINE_INT_EN
| PIPEB_VBLANK_INT_EN
|
3928 SPRITED_FLIPDONE_INT_EN
| SPRITEC_FLIPDONE_INT_EN
|
3929 PLANEB_FLIPDONE_INT_EN
| PIPEA_LINE_COMPARE_INT_EN
|
3930 PIPEA_HLINE_INT_EN
| PIPEA_VBLANK_INT_EN
|
3931 SPRITEB_FLIPDONE_INT_EN
| SPRITEA_FLIPDONE_INT_EN
|
3932 PLANEA_FLIPDONE_INT_EN
);
3935 * WaDisableVLVClockGating_VBIIssue
3936 * Disable clock gating on th GCFG unit to prevent a delay
3937 * in the reporting of vblank events.
3939 I915_WRITE(VLV_GUNIT_CLOCK_GATE
, GCFG_DIS
);
3942 static void g4x_init_clock_gating(struct drm_device
*dev
)
3944 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3945 uint32_t dspclk_gate
;
3947 I915_WRITE(RENCLK_GATE_D1
, 0);
3948 I915_WRITE(RENCLK_GATE_D2
, VF_UNIT_CLOCK_GATE_DISABLE
|
3949 GS_UNIT_CLOCK_GATE_DISABLE
|
3950 CL_UNIT_CLOCK_GATE_DISABLE
);
3951 I915_WRITE(RAMCLK_GATE_D
, 0);
3952 dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
|
3953 OVRUNIT_CLOCK_GATE_DISABLE
|
3954 OVCUNIT_CLOCK_GATE_DISABLE
;
3956 dspclk_gate
|= DSSUNIT_CLOCK_GATE_DISABLE
;
3957 I915_WRITE(DSPCLK_GATE_D
, dspclk_gate
);
3959 /* WaDisableRenderCachePipelinedFlush */
3960 I915_WRITE(CACHE_MODE_0
,
3961 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
3964 static void crestline_init_clock_gating(struct drm_device
*dev
)
3966 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3968 I915_WRITE(RENCLK_GATE_D1
, I965_RCC_CLOCK_GATE_DISABLE
);
3969 I915_WRITE(RENCLK_GATE_D2
, 0);
3970 I915_WRITE(DSPCLK_GATE_D
, 0);
3971 I915_WRITE(RAMCLK_GATE_D
, 0);
3972 I915_WRITE16(DEUC
, 0);
3975 static void broadwater_init_clock_gating(struct drm_device
*dev
)
3977 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3979 I915_WRITE(RENCLK_GATE_D1
, I965_RCZ_CLOCK_GATE_DISABLE
|
3980 I965_RCC_CLOCK_GATE_DISABLE
|
3981 I965_RCPB_CLOCK_GATE_DISABLE
|
3982 I965_ISC_CLOCK_GATE_DISABLE
|
3983 I965_FBC_CLOCK_GATE_DISABLE
);
3984 I915_WRITE(RENCLK_GATE_D2
, 0);
3987 static void gen3_init_clock_gating(struct drm_device
*dev
)
3989 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3990 u32 dstate
= I915_READ(D_STATE
);
3992 dstate
|= DSTATE_PLL_D3_OFF
| DSTATE_GFX_CLOCK_GATING
|
3993 DSTATE_DOT_CLOCK_GATING
;
3994 I915_WRITE(D_STATE
, dstate
);
3996 if (IS_PINEVIEW(dev
))
3997 I915_WRITE(ECOSKPD
, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY
));
3999 /* IIR "flip pending" means done if this bit is set */
4000 I915_WRITE(ECOSKPD
, _MASKED_BIT_DISABLE(ECO_FLIP_DONE
));
4003 static void i85x_init_clock_gating(struct drm_device
*dev
)
4005 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4007 I915_WRITE(RENCLK_GATE_D1
, SV_CLOCK_GATE_DISABLE
);
4010 static void i830_init_clock_gating(struct drm_device
*dev
)
4012 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4014 I915_WRITE(DSPCLK_GATE_D
, OVRUNIT_CLOCK_GATE_DISABLE
);
4017 void intel_init_clock_gating(struct drm_device
*dev
)
4019 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4021 dev_priv
->display
.init_clock_gating(dev
);
4024 /* Starting with Haswell, we have different power wells for
4025 * different parts of the GPU. This attempts to enable them all.
4027 void intel_init_power_wells(struct drm_device
*dev
)
4029 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4030 unsigned long power_wells
[] = {
4037 if (!IS_HASWELL(dev
))
4042 for (i
= 0; i
< ARRAY_SIZE(power_wells
); i
++) {
4043 int well
= I915_READ(power_wells
[i
]);
4045 if ((well
& HSW_PWR_WELL_STATE
) == 0) {
4046 I915_WRITE(power_wells
[i
], well
& HSW_PWR_WELL_ENABLE
);
4047 if (wait_for((I915_READ(power_wells
[i
]) & HSW_PWR_WELL_STATE
), 20))
4048 DRM_ERROR("Error enabling power well %lx\n", power_wells
[i
]);
4055 /* Set up chip specific power management-related functions */
4056 void intel_init_pm(struct drm_device
*dev
)
4058 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4060 if (I915_HAS_FBC(dev
)) {
4061 if (HAS_PCH_SPLIT(dev
)) {
4062 dev_priv
->display
.fbc_enabled
= ironlake_fbc_enabled
;
4063 dev_priv
->display
.enable_fbc
= ironlake_enable_fbc
;
4064 dev_priv
->display
.disable_fbc
= ironlake_disable_fbc
;
4065 } else if (IS_GM45(dev
)) {
4066 dev_priv
->display
.fbc_enabled
= g4x_fbc_enabled
;
4067 dev_priv
->display
.enable_fbc
= g4x_enable_fbc
;
4068 dev_priv
->display
.disable_fbc
= g4x_disable_fbc
;
4069 } else if (IS_CRESTLINE(dev
)) {
4070 dev_priv
->display
.fbc_enabled
= i8xx_fbc_enabled
;
4071 dev_priv
->display
.enable_fbc
= i8xx_enable_fbc
;
4072 dev_priv
->display
.disable_fbc
= i8xx_disable_fbc
;
4074 /* 855GM needs testing */
4078 if (IS_PINEVIEW(dev
))
4079 i915_pineview_get_mem_freq(dev
);
4080 else if (IS_GEN5(dev
))
4081 i915_ironlake_get_mem_freq(dev
);
4083 /* For FIFO watermark updates */
4084 if (HAS_PCH_SPLIT(dev
)) {
4086 if (I915_READ(MLTR_ILK
) & ILK_SRLT_MASK
)
4087 dev_priv
->display
.update_wm
= ironlake_update_wm
;
4089 DRM_DEBUG_KMS("Failed to get proper latency. "
4091 dev_priv
->display
.update_wm
= NULL
;
4093 dev_priv
->display
.init_clock_gating
= ironlake_init_clock_gating
;
4094 } else if (IS_GEN6(dev
)) {
4095 if (SNB_READ_WM0_LATENCY()) {
4096 dev_priv
->display
.update_wm
= sandybridge_update_wm
;
4097 dev_priv
->display
.update_sprite_wm
= sandybridge_update_sprite_wm
;
4099 DRM_DEBUG_KMS("Failed to read display plane latency. "
4101 dev_priv
->display
.update_wm
= NULL
;
4103 dev_priv
->display
.init_clock_gating
= gen6_init_clock_gating
;
4104 } else if (IS_IVYBRIDGE(dev
)) {
4105 /* FIXME: detect B0+ stepping and use auto training */
4106 if (SNB_READ_WM0_LATENCY()) {
4107 dev_priv
->display
.update_wm
= ivybridge_update_wm
;
4108 dev_priv
->display
.update_sprite_wm
= sandybridge_update_sprite_wm
;
4110 DRM_DEBUG_KMS("Failed to read display plane latency. "
4112 dev_priv
->display
.update_wm
= NULL
;
4114 dev_priv
->display
.init_clock_gating
= ivybridge_init_clock_gating
;
4115 } else if (IS_HASWELL(dev
)) {
4116 if (SNB_READ_WM0_LATENCY()) {
4117 dev_priv
->display
.update_wm
= sandybridge_update_wm
;
4118 dev_priv
->display
.update_sprite_wm
= sandybridge_update_sprite_wm
;
4119 dev_priv
->display
.update_linetime_wm
= haswell_update_linetime_wm
;
4121 DRM_DEBUG_KMS("Failed to read display plane latency. "
4123 dev_priv
->display
.update_wm
= NULL
;
4125 dev_priv
->display
.init_clock_gating
= haswell_init_clock_gating
;
4127 dev_priv
->display
.update_wm
= NULL
;
4128 } else if (IS_VALLEYVIEW(dev
)) {
4129 dev_priv
->display
.update_wm
= valleyview_update_wm
;
4130 dev_priv
->display
.init_clock_gating
=
4131 valleyview_init_clock_gating
;
4132 } else if (IS_PINEVIEW(dev
)) {
4133 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev
),
4136 dev_priv
->mem_freq
)) {
4137 DRM_INFO("failed to find known CxSR latency "
4138 "(found ddr%s fsb freq %d, mem freq %d), "
4140 (dev_priv
->is_ddr3
== 1) ? "3" : "2",
4141 dev_priv
->fsb_freq
, dev_priv
->mem_freq
);
4142 /* Disable CxSR and never update its watermark again */
4143 pineview_disable_cxsr(dev
);
4144 dev_priv
->display
.update_wm
= NULL
;
4146 dev_priv
->display
.update_wm
= pineview_update_wm
;
4147 dev_priv
->display
.init_clock_gating
= gen3_init_clock_gating
;
4148 } else if (IS_G4X(dev
)) {
4149 dev_priv
->display
.update_wm
= g4x_update_wm
;
4150 dev_priv
->display
.init_clock_gating
= g4x_init_clock_gating
;
4151 } else if (IS_GEN4(dev
)) {
4152 dev_priv
->display
.update_wm
= i965_update_wm
;
4153 if (IS_CRESTLINE(dev
))
4154 dev_priv
->display
.init_clock_gating
= crestline_init_clock_gating
;
4155 else if (IS_BROADWATER(dev
))
4156 dev_priv
->display
.init_clock_gating
= broadwater_init_clock_gating
;
4157 } else if (IS_GEN3(dev
)) {
4158 dev_priv
->display
.update_wm
= i9xx_update_wm
;
4159 dev_priv
->display
.get_fifo_size
= i9xx_get_fifo_size
;
4160 dev_priv
->display
.init_clock_gating
= gen3_init_clock_gating
;
4161 } else if (IS_I865G(dev
)) {
4162 dev_priv
->display
.update_wm
= i830_update_wm
;
4163 dev_priv
->display
.init_clock_gating
= i85x_init_clock_gating
;
4164 dev_priv
->display
.get_fifo_size
= i830_get_fifo_size
;
4165 } else if (IS_I85X(dev
)) {
4166 dev_priv
->display
.update_wm
= i9xx_update_wm
;
4167 dev_priv
->display
.get_fifo_size
= i85x_get_fifo_size
;
4168 dev_priv
->display
.init_clock_gating
= i85x_init_clock_gating
;
4170 dev_priv
->display
.update_wm
= i830_update_wm
;
4171 dev_priv
->display
.init_clock_gating
= i830_init_clock_gating
;
4173 dev_priv
->display
.get_fifo_size
= i845_get_fifo_size
;
4175 dev_priv
->display
.get_fifo_size
= i830_get_fifo_size
;
4179 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private
*dev_priv
)
4181 u32 gt_thread_status_mask
;
4183 if (IS_HASWELL(dev_priv
->dev
))
4184 gt_thread_status_mask
= GEN6_GT_THREAD_STATUS_CORE_MASK_HSW
;
4186 gt_thread_status_mask
= GEN6_GT_THREAD_STATUS_CORE_MASK
;
4188 /* w/a for a sporadic read returning 0 by waiting for the GT
4189 * thread to wake up.
4191 if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG
) & gt_thread_status_mask
) == 0, 500))
4192 DRM_ERROR("GT thread status wait timed out\n");
4195 static void __gen6_gt_force_wake_reset(struct drm_i915_private
*dev_priv
)
4197 I915_WRITE_NOTRACE(FORCEWAKE
, 0);
4198 POSTING_READ(ECOBUS
); /* something from same cacheline, but !FORCEWAKE */
4201 static void __gen6_gt_force_wake_get(struct drm_i915_private
*dev_priv
)
4205 if (IS_HASWELL(dev_priv
->dev
))
4206 forcewake_ack
= FORCEWAKE_ACK_HSW
;
4208 forcewake_ack
= FORCEWAKE_ACK
;
4210 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack
) & 1) == 0,
4211 FORCEWAKE_ACK_TIMEOUT_MS
))
4212 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4214 I915_WRITE_NOTRACE(FORCEWAKE
, FORCEWAKE_KERNEL
);
4215 POSTING_READ(ECOBUS
); /* something from same cacheline, but !FORCEWAKE */
4217 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack
) & 1),
4218 FORCEWAKE_ACK_TIMEOUT_MS
))
4219 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
4221 __gen6_gt_wait_for_thread_c0(dev_priv
);
4224 static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private
*dev_priv
)
4226 I915_WRITE_NOTRACE(FORCEWAKE_MT
, _MASKED_BIT_DISABLE(0xffff));
4227 /* something from same cacheline, but !FORCEWAKE_MT */
4228 POSTING_READ(ECOBUS
);
4231 static void __gen6_gt_force_wake_mt_get(struct drm_i915_private
*dev_priv
)
4235 if (IS_HASWELL(dev_priv
->dev
))
4236 forcewake_ack
= FORCEWAKE_ACK_HSW
;
4238 forcewake_ack
= FORCEWAKE_MT_ACK
;
4240 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack
) & 1) == 0,
4241 FORCEWAKE_ACK_TIMEOUT_MS
))
4242 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4244 I915_WRITE_NOTRACE(FORCEWAKE_MT
, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL
));
4245 /* something from same cacheline, but !FORCEWAKE_MT */
4246 POSTING_READ(ECOBUS
);
4248 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack
) & 1),
4249 FORCEWAKE_ACK_TIMEOUT_MS
))
4250 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
4252 __gen6_gt_wait_for_thread_c0(dev_priv
);
4256 * Generally this is called implicitly by the register read function. However,
4257 * if some sequence requires the GT to not power down then this function should
4258 * be called at the beginning of the sequence followed by a call to
4259 * gen6_gt_force_wake_put() at the end of the sequence.
4261 void gen6_gt_force_wake_get(struct drm_i915_private
*dev_priv
)
4264 lockmgr(&dev_priv
->gt_lock
, LK_EXCLUSIVE
);
4265 if (dev_priv
->forcewake_count
++ == 0)
4266 dev_priv
->gt
.force_wake_get(dev_priv
);
4267 lockmgr(&dev_priv
->gt_lock
, LK_RELEASE
);
4270 void gen6_gt_check_fifodbg(struct drm_i915_private
*dev_priv
)
4273 gtfifodbg
= I915_READ_NOTRACE(GTFIFODBG
);
4274 if (WARN(gtfifodbg
& GT_FIFO_CPU_ERROR_MASK
,
4275 "MMIO read or write has been dropped %x\n", gtfifodbg
))
4276 I915_WRITE_NOTRACE(GTFIFODBG
, GT_FIFO_CPU_ERROR_MASK
);
4279 static void __gen6_gt_force_wake_put(struct drm_i915_private
*dev_priv
)
4281 I915_WRITE_NOTRACE(FORCEWAKE
, 0);
4282 /* something from same cacheline, but !FORCEWAKE */
4283 POSTING_READ(ECOBUS
);
4284 gen6_gt_check_fifodbg(dev_priv
);
4287 static void __gen6_gt_force_wake_mt_put(struct drm_i915_private
*dev_priv
)
4289 I915_WRITE_NOTRACE(FORCEWAKE_MT
, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL
));
4290 /* something from same cacheline, but !FORCEWAKE_MT */
4291 POSTING_READ(ECOBUS
);
4292 gen6_gt_check_fifodbg(dev_priv
);
4296 * see gen6_gt_force_wake_get()
4298 void gen6_gt_force_wake_put(struct drm_i915_private
*dev_priv
)
4300 lockmgr(&dev_priv
->gt_lock
, LK_EXCLUSIVE
);
4301 if (--dev_priv
->forcewake_count
== 0)
4302 dev_priv
->gt
.force_wake_put(dev_priv
);
4303 lockmgr(&dev_priv
->gt_lock
, LK_RELEASE
);
4306 int __gen6_gt_wait_for_fifo(struct drm_i915_private
*dev_priv
)
4310 if (dev_priv
->gt_fifo_count
< GT_FIFO_NUM_RESERVED_ENTRIES
) {
4312 u32 fifo
= I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES
);
4313 while (fifo
<= GT_FIFO_NUM_RESERVED_ENTRIES
&& loop
--) {
4315 fifo
= I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES
);
4317 if (loop
< 0 && fifo
<= GT_FIFO_NUM_RESERVED_ENTRIES
) {
4318 kprintf("%s loop\n", __func__
);
4321 dev_priv
->gt_fifo_count
= fifo
;
4323 dev_priv
->gt_fifo_count
--;
4328 static void vlv_force_wake_reset(struct drm_i915_private
*dev_priv
)
4330 I915_WRITE_NOTRACE(FORCEWAKE_VLV
, _MASKED_BIT_DISABLE(0xffff));
4331 /* something from same cacheline, but !FORCEWAKE_VLV */
4332 POSTING_READ(FORCEWAKE_ACK_VLV
);
4335 static void vlv_force_wake_get(struct drm_i915_private
*dev_priv
)
4337 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV
) & 1) == 0,
4338 FORCEWAKE_ACK_TIMEOUT_MS
))
4339 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4341 I915_WRITE_NOTRACE(FORCEWAKE_VLV
, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL
));
4343 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV
) & 1),
4344 FORCEWAKE_ACK_TIMEOUT_MS
))
4345 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
4347 __gen6_gt_wait_for_thread_c0(dev_priv
);
4350 static void vlv_force_wake_put(struct drm_i915_private
*dev_priv
)
4352 I915_WRITE_NOTRACE(FORCEWAKE_VLV
, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL
));
4353 /* something from same cacheline, but !FORCEWAKE_VLV */
4354 POSTING_READ(FORCEWAKE_ACK_VLV
);
4355 gen6_gt_check_fifodbg(dev_priv
);
4358 void intel_gt_reset(struct drm_device
*dev
)
4360 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4362 if (IS_VALLEYVIEW(dev
)) {
4363 vlv_force_wake_reset(dev_priv
);
4364 } else if (INTEL_INFO(dev
)->gen
>= 6) {
4365 __gen6_gt_force_wake_reset(dev_priv
);
4366 if (IS_IVYBRIDGE(dev
) || IS_HASWELL(dev
))
4367 __gen6_gt_force_wake_mt_reset(dev_priv
);
4371 void intel_gt_init(struct drm_device
*dev
)
4373 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4375 lockinit(&dev_priv
->gt_lock
, "915gt", 0, LK_CANRECURSE
);
4377 intel_gt_reset(dev
);
4379 if (IS_VALLEYVIEW(dev
)) {
4380 dev_priv
->gt
.force_wake_get
= vlv_force_wake_get
;
4381 dev_priv
->gt
.force_wake_put
= vlv_force_wake_put
;
4382 } else if (IS_IVYBRIDGE(dev
) || IS_HASWELL(dev
)) {
4383 dev_priv
->gt
.force_wake_get
= __gen6_gt_force_wake_mt_get
;
4384 dev_priv
->gt
.force_wake_put
= __gen6_gt_force_wake_mt_put
;
4385 } else if (IS_GEN6(dev
)) {
4386 dev_priv
->gt
.force_wake_get
= __gen6_gt_force_wake_get
;
4387 dev_priv
->gt
.force_wake_put
= __gen6_gt_force_wake_put
;
4389 INIT_DELAYED_WORK(&dev_priv
->rps
.delayed_resume_work
,
4390 intel_gen6_powersave_work
);
4393 int sandybridge_pcode_read(struct drm_i915_private
*dev_priv
, u8 mbox
, u32
*val
)
4396 if (I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) {
4397 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
4401 I915_WRITE(GEN6_PCODE_DATA
, *val
);
4402 I915_WRITE(GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
| mbox
);
4404 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) == 0,
4406 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox
);
4410 *val
= I915_READ(GEN6_PCODE_DATA
);
4411 I915_WRITE(GEN6_PCODE_DATA
, 0);
4416 int sandybridge_pcode_write(struct drm_i915_private
*dev_priv
, u8 mbox
, u32 val
)
4419 if (I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) {
4420 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
4424 I915_WRITE(GEN6_PCODE_DATA
, val
);
4425 I915_WRITE(GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
| mbox
);
4427 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) == 0,
4429 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox
);
4433 I915_WRITE(GEN6_PCODE_DATA
, 0);