2 * Copyright © 2006-2007 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Eric Anholt <eric@anholt.net>
28 #include <sys/limits.h>
31 #include <drm/drm_edid.h>
32 #include "intel_drv.h"
33 #include <drm/i915_drm.h>
35 #include <drm/drm_dp_helper.h>
36 #include <drm/drm_crtc_helper.h>
38 #include <linux/err.h>
40 bool intel_pipe_has_type(struct drm_crtc
*crtc
, int type
);
41 static void intel_increase_pllclock(struct drm_crtc
*crtc
);
42 static void intel_crtc_update_cursor(struct drm_crtc
*crtc
, bool on
);
65 #define INTEL_P2_NUM 2
66 typedef struct intel_limit intel_limit_t
;
68 intel_range_t dot
, vco
, n
, m
, m1
, m2
, p
, p1
;
71 * find_pll() - Find the best values for the PLL
72 * @limit: limits for the PLL
74 * @target: target frequency in kHz
75 * @refclk: reference clock frequency in kHz
76 * @match_clock: if provided, @best_clock P divider must
77 * match the P divider from @match_clock
78 * used for LVDS downclocking
79 * @best_clock: best PLL values found
81 * Returns true on success, false on failure.
83 bool (*find_pll
)(const intel_limit_t
*limit
,
84 struct drm_crtc
*crtc
,
85 int target
, int refclk
,
86 intel_clock_t
*match_clock
,
87 intel_clock_t
*best_clock
);
91 #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
94 intel_pch_rawclk(struct drm_device
*dev
)
96 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
98 WARN_ON(!HAS_PCH_SPLIT(dev
));
100 return I915_READ(PCH_RAWCLK_FREQ
) & RAWCLK_FREQ_MASK
;
104 intel_find_best_PLL(const intel_limit_t
*limit
, struct drm_crtc
*crtc
,
105 int target
, int refclk
, intel_clock_t
*match_clock
,
106 intel_clock_t
*best_clock
);
108 intel_g4x_find_best_PLL(const intel_limit_t
*limit
, struct drm_crtc
*crtc
,
109 int target
, int refclk
, intel_clock_t
*match_clock
,
110 intel_clock_t
*best_clock
);
113 intel_find_pll_g4x_dp(const intel_limit_t
*, struct drm_crtc
*crtc
,
114 int target
, int refclk
, intel_clock_t
*match_clock
,
115 intel_clock_t
*best_clock
);
117 intel_find_pll_ironlake_dp(const intel_limit_t
*, struct drm_crtc
*crtc
,
118 int target
, int refclk
, intel_clock_t
*match_clock
,
119 intel_clock_t
*best_clock
);
122 intel_vlv_find_best_pll(const intel_limit_t
*limit
, struct drm_crtc
*crtc
,
123 int target
, int refclk
, intel_clock_t
*match_clock
,
124 intel_clock_t
*best_clock
);
126 static inline u32
/* units of 100MHz */
127 intel_fdi_link_freq(struct drm_device
*dev
)
130 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
131 return (I915_READ(FDI_PLL_BIOS_0
) & FDI_PLL_FB_CLOCK_MASK
) + 2;
136 static const intel_limit_t intel_limits_i8xx_dvo
= {
137 .dot
= { .min
= 25000, .max
= 350000 },
138 .vco
= { .min
= 930000, .max
= 1400000 },
139 .n
= { .min
= 3, .max
= 16 },
140 .m
= { .min
= 96, .max
= 140 },
141 .m1
= { .min
= 18, .max
= 26 },
142 .m2
= { .min
= 6, .max
= 16 },
143 .p
= { .min
= 4, .max
= 128 },
144 .p1
= { .min
= 2, .max
= 33 },
145 .p2
= { .dot_limit
= 165000,
146 .p2_slow
= 4, .p2_fast
= 2 },
147 .find_pll
= intel_find_best_PLL
,
150 static const intel_limit_t intel_limits_i8xx_lvds
= {
151 .dot
= { .min
= 25000, .max
= 350000 },
152 .vco
= { .min
= 930000, .max
= 1400000 },
153 .n
= { .min
= 3, .max
= 16 },
154 .m
= { .min
= 96, .max
= 140 },
155 .m1
= { .min
= 18, .max
= 26 },
156 .m2
= { .min
= 6, .max
= 16 },
157 .p
= { .min
= 4, .max
= 128 },
158 .p1
= { .min
= 1, .max
= 6 },
159 .p2
= { .dot_limit
= 165000,
160 .p2_slow
= 14, .p2_fast
= 7 },
161 .find_pll
= intel_find_best_PLL
,
164 static const intel_limit_t intel_limits_i9xx_sdvo
= {
165 .dot
= { .min
= 20000, .max
= 400000 },
166 .vco
= { .min
= 1400000, .max
= 2800000 },
167 .n
= { .min
= 1, .max
= 6 },
168 .m
= { .min
= 70, .max
= 120 },
169 .m1
= { .min
= 8, .max
= 18 },
170 .m2
= { .min
= 3, .max
= 7 },
171 .p
= { .min
= 5, .max
= 80 },
172 .p1
= { .min
= 1, .max
= 8 },
173 .p2
= { .dot_limit
= 200000,
174 .p2_slow
= 10, .p2_fast
= 5 },
175 .find_pll
= intel_find_best_PLL
,
178 static const intel_limit_t intel_limits_i9xx_lvds
= {
179 .dot
= { .min
= 20000, .max
= 400000 },
180 .vco
= { .min
= 1400000, .max
= 2800000 },
181 .n
= { .min
= 1, .max
= 6 },
182 .m
= { .min
= 70, .max
= 120 },
183 .m1
= { .min
= 8, .max
= 18 },
184 .m2
= { .min
= 3, .max
= 7 },
185 .p
= { .min
= 7, .max
= 98 },
186 .p1
= { .min
= 1, .max
= 8 },
187 .p2
= { .dot_limit
= 112000,
188 .p2_slow
= 14, .p2_fast
= 7 },
189 .find_pll
= intel_find_best_PLL
,
193 static const intel_limit_t intel_limits_g4x_sdvo
= {
194 .dot
= { .min
= 25000, .max
= 270000 },
195 .vco
= { .min
= 1750000, .max
= 3500000},
196 .n
= { .min
= 1, .max
= 4 },
197 .m
= { .min
= 104, .max
= 138 },
198 .m1
= { .min
= 17, .max
= 23 },
199 .m2
= { .min
= 5, .max
= 11 },
200 .p
= { .min
= 10, .max
= 30 },
201 .p1
= { .min
= 1, .max
= 3},
202 .p2
= { .dot_limit
= 270000,
206 .find_pll
= intel_g4x_find_best_PLL
,
209 static const intel_limit_t intel_limits_g4x_hdmi
= {
210 .dot
= { .min
= 22000, .max
= 400000 },
211 .vco
= { .min
= 1750000, .max
= 3500000},
212 .n
= { .min
= 1, .max
= 4 },
213 .m
= { .min
= 104, .max
= 138 },
214 .m1
= { .min
= 16, .max
= 23 },
215 .m2
= { .min
= 5, .max
= 11 },
216 .p
= { .min
= 5, .max
= 80 },
217 .p1
= { .min
= 1, .max
= 8},
218 .p2
= { .dot_limit
= 165000,
219 .p2_slow
= 10, .p2_fast
= 5 },
220 .find_pll
= intel_g4x_find_best_PLL
,
223 static const intel_limit_t intel_limits_g4x_single_channel_lvds
= {
224 .dot
= { .min
= 20000, .max
= 115000 },
225 .vco
= { .min
= 1750000, .max
= 3500000 },
226 .n
= { .min
= 1, .max
= 3 },
227 .m
= { .min
= 104, .max
= 138 },
228 .m1
= { .min
= 17, .max
= 23 },
229 .m2
= { .min
= 5, .max
= 11 },
230 .p
= { .min
= 28, .max
= 112 },
231 .p1
= { .min
= 2, .max
= 8 },
232 .p2
= { .dot_limit
= 0,
233 .p2_slow
= 14, .p2_fast
= 14
235 .find_pll
= intel_g4x_find_best_PLL
,
238 static const intel_limit_t intel_limits_g4x_dual_channel_lvds
= {
239 .dot
= { .min
= 80000, .max
= 224000 },
240 .vco
= { .min
= 1750000, .max
= 3500000 },
241 .n
= { .min
= 1, .max
= 3 },
242 .m
= { .min
= 104, .max
= 138 },
243 .m1
= { .min
= 17, .max
= 23 },
244 .m2
= { .min
= 5, .max
= 11 },
245 .p
= { .min
= 14, .max
= 42 },
246 .p1
= { .min
= 2, .max
= 6 },
247 .p2
= { .dot_limit
= 0,
248 .p2_slow
= 7, .p2_fast
= 7
250 .find_pll
= intel_g4x_find_best_PLL
,
253 static const intel_limit_t intel_limits_g4x_display_port
= {
254 .dot
= { .min
= 161670, .max
= 227000 },
255 .vco
= { .min
= 1750000, .max
= 3500000},
256 .n
= { .min
= 1, .max
= 2 },
257 .m
= { .min
= 97, .max
= 108 },
258 .m1
= { .min
= 0x10, .max
= 0x12 },
259 .m2
= { .min
= 0x05, .max
= 0x06 },
260 .p
= { .min
= 10, .max
= 20 },
261 .p1
= { .min
= 1, .max
= 2},
262 .p2
= { .dot_limit
= 0,
263 .p2_slow
= 10, .p2_fast
= 10 },
264 .find_pll
= intel_find_pll_g4x_dp
,
267 static const intel_limit_t intel_limits_pineview_sdvo
= {
268 .dot
= { .min
= 20000, .max
= 400000},
269 .vco
= { .min
= 1700000, .max
= 3500000 },
270 /* Pineview's Ncounter is a ring counter */
271 .n
= { .min
= 3, .max
= 6 },
272 .m
= { .min
= 2, .max
= 256 },
273 /* Pineview only has one combined m divider, which we treat as m2. */
274 .m1
= { .min
= 0, .max
= 0 },
275 .m2
= { .min
= 0, .max
= 254 },
276 .p
= { .min
= 5, .max
= 80 },
277 .p1
= { .min
= 1, .max
= 8 },
278 .p2
= { .dot_limit
= 200000,
279 .p2_slow
= 10, .p2_fast
= 5 },
280 .find_pll
= intel_find_best_PLL
,
283 static const intel_limit_t intel_limits_pineview_lvds
= {
284 .dot
= { .min
= 20000, .max
= 400000 },
285 .vco
= { .min
= 1700000, .max
= 3500000 },
286 .n
= { .min
= 3, .max
= 6 },
287 .m
= { .min
= 2, .max
= 256 },
288 .m1
= { .min
= 0, .max
= 0 },
289 .m2
= { .min
= 0, .max
= 254 },
290 .p
= { .min
= 7, .max
= 112 },
291 .p1
= { .min
= 1, .max
= 8 },
292 .p2
= { .dot_limit
= 112000,
293 .p2_slow
= 14, .p2_fast
= 14 },
294 .find_pll
= intel_find_best_PLL
,
297 /* Ironlake / Sandybridge
299 * We calculate clock using (register_value + 2) for N/M1/M2, so here
300 * the range value for them is (actual_value - 2).
302 static const intel_limit_t intel_limits_ironlake_dac
= {
303 .dot
= { .min
= 25000, .max
= 350000 },
304 .vco
= { .min
= 1760000, .max
= 3510000 },
305 .n
= { .min
= 1, .max
= 5 },
306 .m
= { .min
= 79, .max
= 127 },
307 .m1
= { .min
= 12, .max
= 22 },
308 .m2
= { .min
= 5, .max
= 9 },
309 .p
= { .min
= 5, .max
= 80 },
310 .p1
= { .min
= 1, .max
= 8 },
311 .p2
= { .dot_limit
= 225000,
312 .p2_slow
= 10, .p2_fast
= 5 },
313 .find_pll
= intel_g4x_find_best_PLL
,
316 static const intel_limit_t intel_limits_ironlake_single_lvds
= {
317 .dot
= { .min
= 25000, .max
= 350000 },
318 .vco
= { .min
= 1760000, .max
= 3510000 },
319 .n
= { .min
= 1, .max
= 3 },
320 .m
= { .min
= 79, .max
= 118 },
321 .m1
= { .min
= 12, .max
= 22 },
322 .m2
= { .min
= 5, .max
= 9 },
323 .p
= { .min
= 28, .max
= 112 },
324 .p1
= { .min
= 2, .max
= 8 },
325 .p2
= { .dot_limit
= 225000,
326 .p2_slow
= 14, .p2_fast
= 14 },
327 .find_pll
= intel_g4x_find_best_PLL
,
330 static const intel_limit_t intel_limits_ironlake_dual_lvds
= {
331 .dot
= { .min
= 25000, .max
= 350000 },
332 .vco
= { .min
= 1760000, .max
= 3510000 },
333 .n
= { .min
= 1, .max
= 3 },
334 .m
= { .min
= 79, .max
= 127 },
335 .m1
= { .min
= 12, .max
= 22 },
336 .m2
= { .min
= 5, .max
= 9 },
337 .p
= { .min
= 14, .max
= 56 },
338 .p1
= { .min
= 2, .max
= 8 },
339 .p2
= { .dot_limit
= 225000,
340 .p2_slow
= 7, .p2_fast
= 7 },
341 .find_pll
= intel_g4x_find_best_PLL
,
344 /* LVDS 100mhz refclk limits. */
345 static const intel_limit_t intel_limits_ironlake_single_lvds_100m
= {
346 .dot
= { .min
= 25000, .max
= 350000 },
347 .vco
= { .min
= 1760000, .max
= 3510000 },
348 .n
= { .min
= 1, .max
= 2 },
349 .m
= { .min
= 79, .max
= 126 },
350 .m1
= { .min
= 12, .max
= 22 },
351 .m2
= { .min
= 5, .max
= 9 },
352 .p
= { .min
= 28, .max
= 112 },
353 .p1
= { .min
= 2, .max
= 8 },
354 .p2
= { .dot_limit
= 225000,
355 .p2_slow
= 14, .p2_fast
= 14 },
356 .find_pll
= intel_g4x_find_best_PLL
,
359 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m
= {
360 .dot
= { .min
= 25000, .max
= 350000 },
361 .vco
= { .min
= 1760000, .max
= 3510000 },
362 .n
= { .min
= 1, .max
= 3 },
363 .m
= { .min
= 79, .max
= 126 },
364 .m1
= { .min
= 12, .max
= 22 },
365 .m2
= { .min
= 5, .max
= 9 },
366 .p
= { .min
= 14, .max
= 42 },
367 .p1
= { .min
= 2, .max
= 6 },
368 .p2
= { .dot_limit
= 225000,
369 .p2_slow
= 7, .p2_fast
= 7 },
370 .find_pll
= intel_g4x_find_best_PLL
,
373 static const intel_limit_t intel_limits_ironlake_display_port
= {
374 .dot
= { .min
= 25000, .max
= 350000 },
375 .vco
= { .min
= 1760000, .max
= 3510000},
376 .n
= { .min
= 1, .max
= 2 },
377 .m
= { .min
= 81, .max
= 90 },
378 .m1
= { .min
= 12, .max
= 22 },
379 .m2
= { .min
= 5, .max
= 9 },
380 .p
= { .min
= 10, .max
= 20 },
381 .p1
= { .min
= 1, .max
= 2},
382 .p2
= { .dot_limit
= 0,
383 .p2_slow
= 10, .p2_fast
= 10 },
384 .find_pll
= intel_find_pll_ironlake_dp
,
387 static const intel_limit_t intel_limits_vlv_dac
= {
388 .dot
= { .min
= 25000, .max
= 270000 },
389 .vco
= { .min
= 4000000, .max
= 6000000 },
390 .n
= { .min
= 1, .max
= 7 },
391 .m
= { .min
= 22, .max
= 450 }, /* guess */
392 .m1
= { .min
= 2, .max
= 3 },
393 .m2
= { .min
= 11, .max
= 156 },
394 .p
= { .min
= 10, .max
= 30 },
395 .p1
= { .min
= 2, .max
= 3 },
396 .p2
= { .dot_limit
= 270000,
397 .p2_slow
= 2, .p2_fast
= 20 },
398 .find_pll
= intel_vlv_find_best_pll
,
401 static const intel_limit_t intel_limits_vlv_hdmi
= {
402 .dot
= { .min
= 20000, .max
= 165000 },
403 .vco
= { .min
= 4000000, .max
= 5994000},
404 .n
= { .min
= 1, .max
= 7 },
405 .m
= { .min
= 60, .max
= 300 }, /* guess */
406 .m1
= { .min
= 2, .max
= 3 },
407 .m2
= { .min
= 11, .max
= 156 },
408 .p
= { .min
= 10, .max
= 30 },
409 .p1
= { .min
= 2, .max
= 3 },
410 .p2
= { .dot_limit
= 270000,
411 .p2_slow
= 2, .p2_fast
= 20 },
412 .find_pll
= intel_vlv_find_best_pll
,
415 static const intel_limit_t intel_limits_vlv_dp
= {
416 .dot
= { .min
= 25000, .max
= 270000 },
417 .vco
= { .min
= 4000000, .max
= 6000000 },
418 .n
= { .min
= 1, .max
= 7 },
419 .m
= { .min
= 22, .max
= 450 },
420 .m1
= { .min
= 2, .max
= 3 },
421 .m2
= { .min
= 11, .max
= 156 },
422 .p
= { .min
= 10, .max
= 30 },
423 .p1
= { .min
= 2, .max
= 3 },
424 .p2
= { .dot_limit
= 270000,
425 .p2_slow
= 2, .p2_fast
= 20 },
426 .find_pll
= intel_vlv_find_best_pll
,
429 u32
intel_dpio_read(struct drm_i915_private
*dev_priv
, int reg
)
431 WARN_ON(!mutex_is_locked(&dev_priv
->dpio_lock
));
433 if (wait_for_atomic_us((I915_READ(DPIO_PKT
) & DPIO_BUSY
) == 0, 100)) {
434 DRM_ERROR("DPIO idle wait timed out\n");
438 I915_WRITE(DPIO_REG
, reg
);
439 I915_WRITE(DPIO_PKT
, DPIO_RID
| DPIO_OP_READ
| DPIO_PORTID
|
441 if (wait_for_atomic_us((I915_READ(DPIO_PKT
) & DPIO_BUSY
) == 0, 100)) {
442 DRM_ERROR("DPIO read wait timed out\n");
446 return I915_READ(DPIO_DATA
);
449 static void intel_dpio_write(struct drm_i915_private
*dev_priv
, int reg
,
452 WARN_ON(!mutex_is_locked(&dev_priv
->dpio_lock
));
454 if (wait_for_atomic_us((I915_READ(DPIO_PKT
) & DPIO_BUSY
) == 0, 100)) {
455 DRM_ERROR("DPIO idle wait timed out\n");
459 I915_WRITE(DPIO_DATA
, val
);
460 I915_WRITE(DPIO_REG
, reg
);
461 I915_WRITE(DPIO_PKT
, DPIO_RID
| DPIO_OP_WRITE
| DPIO_PORTID
|
463 if (wait_for_atomic_us((I915_READ(DPIO_PKT
) & DPIO_BUSY
) == 0, 100))
464 DRM_ERROR("DPIO write wait timed out\n");
467 static void vlv_init_dpio(struct drm_device
*dev
)
469 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
471 /* Reset the DPIO config */
472 I915_WRITE(DPIO_CTL
, 0);
473 POSTING_READ(DPIO_CTL
);
474 I915_WRITE(DPIO_CTL
, 1);
475 POSTING_READ(DPIO_CTL
);
478 static const intel_limit_t
*intel_ironlake_limit(struct drm_crtc
*crtc
,
481 struct drm_device
*dev
= crtc
->dev
;
482 const intel_limit_t
*limit
;
484 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
)) {
485 if (intel_is_dual_link_lvds(dev
)) {
486 if (refclk
== 100000)
487 limit
= &intel_limits_ironlake_dual_lvds_100m
;
489 limit
= &intel_limits_ironlake_dual_lvds
;
491 if (refclk
== 100000)
492 limit
= &intel_limits_ironlake_single_lvds_100m
;
494 limit
= &intel_limits_ironlake_single_lvds
;
496 } else if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_DISPLAYPORT
) ||
497 intel_pipe_has_type(crtc
, INTEL_OUTPUT_EDP
))
498 limit
= &intel_limits_ironlake_display_port
;
500 limit
= &intel_limits_ironlake_dac
;
505 static const intel_limit_t
*intel_g4x_limit(struct drm_crtc
*crtc
)
507 struct drm_device
*dev
= crtc
->dev
;
508 const intel_limit_t
*limit
;
510 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
)) {
511 if (intel_is_dual_link_lvds(dev
))
512 limit
= &intel_limits_g4x_dual_channel_lvds
;
514 limit
= &intel_limits_g4x_single_channel_lvds
;
515 } else if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_HDMI
) ||
516 intel_pipe_has_type(crtc
, INTEL_OUTPUT_ANALOG
)) {
517 limit
= &intel_limits_g4x_hdmi
;
518 } else if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_SDVO
)) {
519 limit
= &intel_limits_g4x_sdvo
;
520 } else if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_DISPLAYPORT
)) {
521 limit
= &intel_limits_g4x_display_port
;
522 } else /* The option is for other outputs */
523 limit
= &intel_limits_i9xx_sdvo
;
528 static const intel_limit_t
*intel_limit(struct drm_crtc
*crtc
, int refclk
)
530 struct drm_device
*dev
= crtc
->dev
;
531 const intel_limit_t
*limit
;
533 if (HAS_PCH_SPLIT(dev
))
534 limit
= intel_ironlake_limit(crtc
, refclk
);
535 else if (IS_G4X(dev
)) {
536 limit
= intel_g4x_limit(crtc
);
537 } else if (IS_PINEVIEW(dev
)) {
538 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
))
539 limit
= &intel_limits_pineview_lvds
;
541 limit
= &intel_limits_pineview_sdvo
;
542 } else if (IS_VALLEYVIEW(dev
)) {
543 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_ANALOG
))
544 limit
= &intel_limits_vlv_dac
;
545 else if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_HDMI
))
546 limit
= &intel_limits_vlv_hdmi
;
548 limit
= &intel_limits_vlv_dp
;
549 } else if (!IS_GEN2(dev
)) {
550 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
))
551 limit
= &intel_limits_i9xx_lvds
;
553 limit
= &intel_limits_i9xx_sdvo
;
555 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
))
556 limit
= &intel_limits_i8xx_lvds
;
558 limit
= &intel_limits_i8xx_dvo
;
563 /* m1 is reserved as 0 in Pineview, n is a ring counter */
564 static void pineview_clock(int refclk
, intel_clock_t
*clock
)
566 clock
->m
= clock
->m2
+ 2;
567 clock
->p
= clock
->p1
* clock
->p2
;
568 clock
->vco
= refclk
* clock
->m
/ clock
->n
;
569 clock
->dot
= clock
->vco
/ clock
->p
;
572 static void intel_clock(struct drm_device
*dev
, int refclk
, intel_clock_t
*clock
)
574 if (IS_PINEVIEW(dev
)) {
575 pineview_clock(refclk
, clock
);
578 clock
->m
= 5 * (clock
->m1
+ 2) + (clock
->m2
+ 2);
579 clock
->p
= clock
->p1
* clock
->p2
;
580 clock
->vco
= refclk
* clock
->m
/ (clock
->n
+ 2);
581 clock
->dot
= clock
->vco
/ clock
->p
;
585 * Returns whether any output on the specified pipe is of the specified type
587 bool intel_pipe_has_type(struct drm_crtc
*crtc
, int type
)
589 struct drm_device
*dev
= crtc
->dev
;
590 struct intel_encoder
*encoder
;
592 for_each_encoder_on_crtc(dev
, crtc
, encoder
)
593 if (encoder
->type
== type
)
599 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
601 * Returns whether the given set of divisors are valid for a given refclk with
602 * the given connectors.
605 static bool intel_PLL_is_valid(struct drm_device
*dev
,
606 const intel_limit_t
*limit
,
607 const intel_clock_t
*clock
)
609 if (clock
->p1
< limit
->p1
.min
|| limit
->p1
.max
< clock
->p1
)
610 INTELPllInvalid("p1 out of range\n");
611 if (clock
->p
< limit
->p
.min
|| limit
->p
.max
< clock
->p
)
612 INTELPllInvalid("p out of range\n");
613 if (clock
->m2
< limit
->m2
.min
|| limit
->m2
.max
< clock
->m2
)
614 INTELPllInvalid("m2 out of range\n");
615 if (clock
->m1
< limit
->m1
.min
|| limit
->m1
.max
< clock
->m1
)
616 INTELPllInvalid("m1 out of range\n");
617 if (clock
->m1
<= clock
->m2
&& !IS_PINEVIEW(dev
))
618 INTELPllInvalid("m1 <= m2\n");
619 if (clock
->m
< limit
->m
.min
|| limit
->m
.max
< clock
->m
)
620 INTELPllInvalid("m out of range\n");
621 if (clock
->n
< limit
->n
.min
|| limit
->n
.max
< clock
->n
)
622 INTELPllInvalid("n out of range\n");
623 if (clock
->vco
< limit
->vco
.min
|| limit
->vco
.max
< clock
->vco
)
624 INTELPllInvalid("vco out of range\n");
625 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
626 * connector, etc., rather than just a single range.
628 if (clock
->dot
< limit
->dot
.min
|| limit
->dot
.max
< clock
->dot
)
629 INTELPllInvalid("dot out of range\n");
635 intel_find_best_PLL(const intel_limit_t
*limit
, struct drm_crtc
*crtc
,
636 int target
, int refclk
, intel_clock_t
*match_clock
,
637 intel_clock_t
*best_clock
)
640 struct drm_device
*dev
= crtc
->dev
;
644 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
)) {
646 * For LVDS just rely on its current settings for dual-channel.
647 * We haven't figured out how to reliably set up different
648 * single/dual channel state, if we even can.
650 if (intel_is_dual_link_lvds(dev
))
651 clock
.p2
= limit
->p2
.p2_fast
;
653 clock
.p2
= limit
->p2
.p2_slow
;
655 if (target
< limit
->p2
.dot_limit
)
656 clock
.p2
= limit
->p2
.p2_slow
;
658 clock
.p2
= limit
->p2
.p2_fast
;
661 memset(best_clock
, 0, sizeof(*best_clock
));
663 for (clock
.m1
= limit
->m1
.min
; clock
.m1
<= limit
->m1
.max
;
665 for (clock
.m2
= limit
->m2
.min
;
666 clock
.m2
<= limit
->m2
.max
; clock
.m2
++) {
667 /* m1 is always 0 in Pineview */
668 if (clock
.m2
>= clock
.m1
&& !IS_PINEVIEW(dev
))
670 for (clock
.n
= limit
->n
.min
;
671 clock
.n
<= limit
->n
.max
; clock
.n
++) {
672 for (clock
.p1
= limit
->p1
.min
;
673 clock
.p1
<= limit
->p1
.max
; clock
.p1
++) {
676 intel_clock(dev
, refclk
, &clock
);
677 if (!intel_PLL_is_valid(dev
, limit
,
681 clock
.p
!= match_clock
->p
)
684 this_err
= abs(clock
.dot
- target
);
685 if (this_err
< err
) {
694 return (err
!= target
);
698 intel_g4x_find_best_PLL(const intel_limit_t
*limit
, struct drm_crtc
*crtc
,
699 int target
, int refclk
, intel_clock_t
*match_clock
,
700 intel_clock_t
*best_clock
)
702 struct drm_device
*dev
= crtc
->dev
;
706 /* approximately equals target * 0.00585 */
707 int err_most
= (target
>> 8) + (target
>> 9);
710 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
)) {
713 if (HAS_PCH_SPLIT(dev
))
717 if (intel_is_dual_link_lvds(dev
))
718 clock
.p2
= limit
->p2
.p2_fast
;
720 clock
.p2
= limit
->p2
.p2_slow
;
722 if (target
< limit
->p2
.dot_limit
)
723 clock
.p2
= limit
->p2
.p2_slow
;
725 clock
.p2
= limit
->p2
.p2_fast
;
728 memset(best_clock
, 0, sizeof(*best_clock
));
729 max_n
= limit
->n
.max
;
730 /* based on hardware requirement, prefer smaller n to precision */
731 for (clock
.n
= limit
->n
.min
; clock
.n
<= max_n
; clock
.n
++) {
732 /* based on hardware requirement, prefere larger m1,m2 */
733 for (clock
.m1
= limit
->m1
.max
;
734 clock
.m1
>= limit
->m1
.min
; clock
.m1
--) {
735 for (clock
.m2
= limit
->m2
.max
;
736 clock
.m2
>= limit
->m2
.min
; clock
.m2
--) {
737 for (clock
.p1
= limit
->p1
.max
;
738 clock
.p1
>= limit
->p1
.min
; clock
.p1
--) {
741 intel_clock(dev
, refclk
, &clock
);
742 if (!intel_PLL_is_valid(dev
, limit
,
746 clock
.p
!= match_clock
->p
)
749 this_err
= abs(clock
.dot
- target
);
750 if (this_err
< err_most
) {
764 intel_find_pll_ironlake_dp(const intel_limit_t
*limit
, struct drm_crtc
*crtc
,
765 int target
, int refclk
, intel_clock_t
*match_clock
,
766 intel_clock_t
*best_clock
)
768 struct drm_device
*dev
= crtc
->dev
;
771 if (target
< 200000) {
784 intel_clock(dev
, refclk
, &clock
);
785 memcpy(best_clock
, &clock
, sizeof(intel_clock_t
));
789 /* DisplayPort has only two frequencies, 162MHz and 270MHz */
791 intel_find_pll_g4x_dp(const intel_limit_t
*limit
, struct drm_crtc
*crtc
,
792 int target
, int refclk
, intel_clock_t
*match_clock
,
793 intel_clock_t
*best_clock
)
796 if (target
< 200000) {
809 clock
.m
= 5 * (clock
.m1
+ 2) + (clock
.m2
+ 2);
810 clock
.p
= (clock
.p1
* clock
.p2
);
811 clock
.dot
= 96000 * clock
.m
/ (clock
.n
+ 2) / clock
.p
;
813 memcpy(best_clock
, &clock
, sizeof(intel_clock_t
));
818 intel_vlv_find_best_pll(const intel_limit_t
*limit
, struct drm_crtc
*crtc
,
819 int target
, int refclk
, intel_clock_t
*match_clock
,
820 intel_clock_t
*best_clock
)
822 u32 p1
, p2
, m1
, m2
, vco
, bestn
, bestm1
, bestm2
, bestp1
, bestp2
;
824 u32 updrate
, minupdate
, fracbits
, p
;
825 unsigned long bestppm
, ppm
, absppm
;
829 dotclk
= target
* 1000;
832 fastclk
= dotclk
/ (2*100);
836 n
= p
= p1
= p2
= m
= m1
= m2
= vco
= bestn
= 0;
837 bestm1
= bestm2
= bestp1
= bestp2
= 0;
839 /* based on hardware requirement, prefer smaller n to precision */
840 for (n
= limit
->n
.min
; n
<= ((refclk
) / minupdate
); n
++) {
841 updrate
= refclk
/ n
;
842 for (p1
= limit
->p1
.max
; p1
> limit
->p1
.min
; p1
--) {
843 for (p2
= limit
->p2
.p2_fast
+1; p2
> 0; p2
--) {
847 /* based on hardware requirement, prefer bigger m1,m2 values */
848 for (m1
= limit
->m1
.min
; m1
<= limit
->m1
.max
; m1
++) {
849 m2
= (((2*(fastclk
* p
* n
/ m1
)) +
850 refclk
) / (2*refclk
));
853 if (vco
>= limit
->vco
.min
&& vco
< limit
->vco
.max
) {
854 ppm
= 1000000 * ((vco
/ p
) - fastclk
) / fastclk
;
855 absppm
= (ppm
> 0) ? ppm
: (-ppm
);
856 if (absppm
< 100 && ((p1
* p2
) > (bestp1
* bestp2
))) {
860 if (absppm
< bestppm
- 10) {
877 best_clock
->n
= bestn
;
878 best_clock
->m1
= bestm1
;
879 best_clock
->m2
= bestm2
;
880 best_clock
->p1
= bestp1
;
881 best_clock
->p2
= bestp2
;
886 enum transcoder
intel_pipe_to_cpu_transcoder(struct drm_i915_private
*dev_priv
,
889 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
890 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
892 return intel_crtc
->config
.cpu_transcoder
;
895 static void ironlake_wait_for_vblank(struct drm_device
*dev
, int pipe
)
897 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
898 u32 frame
, frame_reg
= PIPEFRAME(pipe
);
900 frame
= I915_READ(frame_reg
);
902 if (wait_for(I915_READ_NOTRACE(frame_reg
) != frame
, 50))
903 DRM_DEBUG_KMS("vblank wait timed out\n");
907 * intel_wait_for_vblank - wait for vblank on a given pipe
909 * @pipe: pipe to wait for
911 * Wait for vblank to occur on a given pipe. Needed for various bits of
914 void intel_wait_for_vblank(struct drm_device
*dev
, int pipe
)
916 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
917 int pipestat_reg
= PIPESTAT(pipe
);
919 if (INTEL_INFO(dev
)->gen
>= 5) {
920 ironlake_wait_for_vblank(dev
, pipe
);
924 /* Clear existing vblank status. Note this will clear any other
925 * sticky status fields as well.
927 * This races with i915_driver_irq_handler() with the result
928 * that either function could miss a vblank event. Here it is not
929 * fatal, as we will either wait upon the next vblank interrupt or
930 * timeout. Generally speaking intel_wait_for_vblank() is only
931 * called during modeset at which time the GPU should be idle and
932 * should *not* be performing page flips and thus not waiting on
934 * Currently, the result of us stealing a vblank from the irq
935 * handler is that a single frame will be skipped during swapbuffers.
937 I915_WRITE(pipestat_reg
,
938 I915_READ(pipestat_reg
) | PIPE_VBLANK_INTERRUPT_STATUS
);
940 /* Wait for vblank interrupt bit to set */
941 if (wait_for(I915_READ(pipestat_reg
) &
942 PIPE_VBLANK_INTERRUPT_STATUS
,
944 DRM_DEBUG_KMS("vblank wait timed out\n");
948 * intel_wait_for_pipe_off - wait for pipe to turn off
950 * @pipe: pipe to wait for
952 * After disabling a pipe, we can't wait for vblank in the usual way,
953 * spinning on the vblank interrupt status bit, since we won't actually
954 * see an interrupt when the pipe is disabled.
957 * wait for the pipe register state bit to turn off
960 * wait for the display line value to settle (it usually
961 * ends up stopping at the start of the next frame).
964 void intel_wait_for_pipe_off(struct drm_device
*dev
, int pipe
)
966 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
967 enum transcoder cpu_transcoder
= intel_pipe_to_cpu_transcoder(dev_priv
,
970 if (INTEL_INFO(dev
)->gen
>= 4) {
971 int reg
= PIPECONF(cpu_transcoder
);
973 /* Wait for the Pipe State to go off */
974 if (wait_for((I915_READ(reg
) & I965_PIPECONF_ACTIVE
) == 0,
976 WARN(1, "pipe_off wait timed out\n");
978 u32 last_line
, line_mask
;
979 int reg
= PIPEDSL(pipe
);
980 unsigned long timeout
= jiffies
+ msecs_to_jiffies(100);
983 line_mask
= DSL_LINEMASK_GEN2
;
985 line_mask
= DSL_LINEMASK_GEN3
;
987 /* Wait for the display line to settle */
989 last_line
= I915_READ(reg
) & line_mask
;
991 } while (((I915_READ(reg
) & line_mask
) != last_line
) &&
992 time_after(timeout
, jiffies
));
993 if (time_after(jiffies
, timeout
))
994 WARN(1, "pipe_off wait timed out\n");
999 * ibx_digital_port_connected - is the specified port connected?
1000 * @dev_priv: i915 private structure
1001 * @port: the port to test
1003 * Returns true if @port is connected, false otherwise.
1005 bool ibx_digital_port_connected(struct drm_i915_private
*dev_priv
,
1006 struct intel_digital_port
*port
)
1010 if (HAS_PCH_IBX(dev_priv
->dev
)) {
1011 switch(port
->port
) {
1013 bit
= SDE_PORTB_HOTPLUG
;
1016 bit
= SDE_PORTC_HOTPLUG
;
1019 bit
= SDE_PORTD_HOTPLUG
;
1025 switch(port
->port
) {
1027 bit
= SDE_PORTB_HOTPLUG_CPT
;
1030 bit
= SDE_PORTC_HOTPLUG_CPT
;
1033 bit
= SDE_PORTD_HOTPLUG_CPT
;
1040 return I915_READ(SDEISR
) & bit
;
1043 static const char *state_string(bool enabled
)
1045 return enabled
? "on" : "off";
1048 /* Only for pre-ILK configs */
1049 static void assert_pll(struct drm_i915_private
*dev_priv
,
1050 enum i915_pipe pipe
, bool state
)
1057 val
= I915_READ(reg
);
1058 cur_state
= !!(val
& DPLL_VCO_ENABLE
);
1059 WARN(cur_state
!= state
,
1060 "PLL state assertion failure (expected %s, current %s)\n",
1061 state_string(state
), state_string(cur_state
));
1063 #define assert_pll_enabled(d, p) assert_pll(d, p, true)
1064 #define assert_pll_disabled(d, p) assert_pll(d, p, false)
1067 static void assert_pch_pll(struct drm_i915_private
*dev_priv
,
1068 struct intel_pch_pll
*pll
,
1069 struct intel_crtc
*crtc
,
1075 if (HAS_PCH_LPT(dev_priv
->dev
)) {
1076 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
1081 "asserting PCH PLL %s with no PLL\n", state_string(state
)))
1084 val
= I915_READ(pll
->pll_reg
);
1085 cur_state
= !!(val
& DPLL_VCO_ENABLE
);
1086 WARN(cur_state
!= state
,
1087 "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
1088 pll
->pll_reg
, state_string(state
), state_string(cur_state
), val
);
1090 /* Make sure the selected PLL is correctly attached to the transcoder */
1091 if (crtc
&& HAS_PCH_CPT(dev_priv
->dev
)) {
1094 pch_dpll
= I915_READ(PCH_DPLL_SEL
);
1095 cur_state
= pll
->pll_reg
== _PCH_DPLL_B
;
1096 if (!WARN(((pch_dpll
>> (4 * crtc
->pipe
)) & 1) != cur_state
,
1097 "PLL[%d] not attached to this transcoder %d: %08x\n",
1098 cur_state
, crtc
->pipe
, pch_dpll
)) {
1099 cur_state
= !!(val
>> (4*crtc
->pipe
+ 3));
1100 WARN(cur_state
!= state
,
1101 "PLL[%d] not %s on this transcoder %d: %08x\n",
1102 pll
->pll_reg
== _PCH_DPLL_B
,
1103 state_string(state
),
1109 #define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
1110 #define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
1112 static void assert_fdi_tx(struct drm_i915_private
*dev_priv
,
1113 enum i915_pipe pipe
, bool state
)
1118 enum transcoder cpu_transcoder
= intel_pipe_to_cpu_transcoder(dev_priv
,
1121 if (HAS_DDI(dev_priv
->dev
)) {
1122 /* DDI does not have a specific FDI_TX register */
1123 reg
= TRANS_DDI_FUNC_CTL(cpu_transcoder
);
1124 val
= I915_READ(reg
);
1125 cur_state
= !!(val
& TRANS_DDI_FUNC_ENABLE
);
1127 reg
= FDI_TX_CTL(pipe
);
1128 val
= I915_READ(reg
);
1129 cur_state
= !!(val
& FDI_TX_ENABLE
);
1131 WARN(cur_state
!= state
,
1132 "FDI TX state assertion failure (expected %s, current %s)\n",
1133 state_string(state
), state_string(cur_state
));
1135 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1136 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1138 static void assert_fdi_rx(struct drm_i915_private
*dev_priv
,
1139 enum i915_pipe pipe
, bool state
)
1145 reg
= FDI_RX_CTL(pipe
);
1146 val
= I915_READ(reg
);
1147 cur_state
= !!(val
& FDI_RX_ENABLE
);
1148 WARN(cur_state
!= state
,
1149 "FDI RX state assertion failure (expected %s, current %s)\n",
1150 state_string(state
), state_string(cur_state
));
1152 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1153 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1155 static void assert_fdi_tx_pll_enabled(struct drm_i915_private
*dev_priv
,
1156 enum i915_pipe pipe
)
1161 /* ILK FDI PLL is always enabled */
1162 if (dev_priv
->info
->gen
== 5)
1165 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1166 if (HAS_DDI(dev_priv
->dev
))
1169 reg
= FDI_TX_CTL(pipe
);
1170 val
= I915_READ(reg
);
1171 WARN(!(val
& FDI_TX_PLL_ENABLE
), "FDI TX PLL assertion failure, should be active but is disabled\n");
1174 static void assert_fdi_rx_pll_enabled(struct drm_i915_private
*dev_priv
,
1175 enum i915_pipe pipe
)
1180 reg
= FDI_RX_CTL(pipe
);
1181 val
= I915_READ(reg
);
1182 WARN(!(val
& FDI_RX_PLL_ENABLE
), "FDI RX PLL assertion failure, should be active but is disabled\n");
1185 static void assert_panel_unlocked(struct drm_i915_private
*dev_priv
,
1186 enum i915_pipe pipe
)
1188 int pp_reg
, lvds_reg
;
1190 enum i915_pipe panel_pipe
= PIPE_A
;
1193 if (HAS_PCH_SPLIT(dev_priv
->dev
)) {
1194 pp_reg
= PCH_PP_CONTROL
;
1195 lvds_reg
= PCH_LVDS
;
1197 pp_reg
= PP_CONTROL
;
1201 val
= I915_READ(pp_reg
);
1202 if (!(val
& PANEL_POWER_ON
) ||
1203 ((val
& PANEL_UNLOCK_REGS
) == PANEL_UNLOCK_REGS
))
1206 if (I915_READ(lvds_reg
) & LVDS_PIPEB_SELECT
)
1207 panel_pipe
= PIPE_B
;
1209 WARN(panel_pipe
== pipe
&& locked
,
1210 "panel assertion failure, pipe %c regs locked\n",
1214 void assert_pipe(struct drm_i915_private
*dev_priv
,
1215 enum i915_pipe pipe
, bool state
)
1220 enum transcoder cpu_transcoder
= intel_pipe_to_cpu_transcoder(dev_priv
,
1223 /* if we need the pipe A quirk it must be always on */
1224 if (pipe
== PIPE_A
&& dev_priv
->quirks
& QUIRK_PIPEA_FORCE
)
1227 if (!intel_using_power_well(dev_priv
->dev
) &&
1228 cpu_transcoder
!= TRANSCODER_EDP
) {
1231 reg
= PIPECONF(cpu_transcoder
);
1232 val
= I915_READ(reg
);
1233 cur_state
= !!(val
& PIPECONF_ENABLE
);
1236 WARN(cur_state
!= state
,
1237 "pipe %c assertion failure (expected %s, current %s)\n",
1238 pipe_name(pipe
), state_string(state
), state_string(cur_state
));
1241 static void assert_plane(struct drm_i915_private
*dev_priv
,
1242 enum plane plane
, bool state
)
1248 reg
= DSPCNTR(plane
);
1249 val
= I915_READ(reg
);
1250 cur_state
= !!(val
& DISPLAY_PLANE_ENABLE
);
1251 WARN(cur_state
!= state
,
1252 "plane %c assertion failure (expected %s, current %s)\n",
1253 plane_name(plane
), state_string(state
), state_string(cur_state
));
1256 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1257 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1259 static void assert_planes_disabled(struct drm_i915_private
*dev_priv
,
1260 enum i915_pipe pipe
)
1266 /* Planes are fixed to pipes on ILK+ */
1267 if (HAS_PCH_SPLIT(dev_priv
->dev
) || IS_VALLEYVIEW(dev_priv
->dev
)) {
1268 reg
= DSPCNTR(pipe
);
1269 val
= I915_READ(reg
);
1270 WARN((val
& DISPLAY_PLANE_ENABLE
),
1271 "plane %c assertion failure, should be disabled but not\n",
1276 /* Need to check both planes against the pipe */
1277 for (i
= 0; i
< 2; i
++) {
1279 val
= I915_READ(reg
);
1280 cur_pipe
= (val
& DISPPLANE_SEL_PIPE_MASK
) >>
1281 DISPPLANE_SEL_PIPE_SHIFT
;
1282 WARN((val
& DISPLAY_PLANE_ENABLE
) && pipe
== cur_pipe
,
1283 "plane %c assertion failure, should be off on pipe %c but is still active\n",
1284 plane_name(i
), pipe_name(pipe
));
1288 static void assert_sprites_disabled(struct drm_i915_private
*dev_priv
,
1289 enum i915_pipe pipe
)
1294 if (!IS_VALLEYVIEW(dev_priv
->dev
))
1297 /* Need to check both planes against the pipe */
1298 for (i
= 0; i
< dev_priv
->num_plane
; i
++) {
1299 reg
= SPCNTR(pipe
, i
);
1300 val
= I915_READ(reg
);
1301 WARN((val
& SP_ENABLE
),
1302 "sprite %d assertion failure, should be off on pipe %c but is still active\n",
1303 pipe
* 2 + i
, pipe_name(pipe
));
1307 static void assert_pch_refclk_enabled(struct drm_i915_private
*dev_priv
)
1312 if (HAS_PCH_LPT(dev_priv
->dev
)) {
1313 DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
1317 val
= I915_READ(PCH_DREF_CONTROL
);
1318 enabled
= !!(val
& (DREF_SSC_SOURCE_MASK
| DREF_NONSPREAD_SOURCE_MASK
|
1319 DREF_SUPERSPREAD_SOURCE_MASK
));
1320 WARN(!enabled
, "PCH refclk assertion failure, should be active but is disabled\n");
1323 static void assert_transcoder_disabled(struct drm_i915_private
*dev_priv
,
1324 enum i915_pipe pipe
)
1330 reg
= TRANSCONF(pipe
);
1331 val
= I915_READ(reg
);
1332 enabled
= !!(val
& TRANS_ENABLE
);
1334 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1338 static bool dp_pipe_enabled(struct drm_i915_private
*dev_priv
,
1339 enum i915_pipe pipe
, u32 port_sel
, u32 val
)
1341 if ((val
& DP_PORT_EN
) == 0)
1344 if (HAS_PCH_CPT(dev_priv
->dev
)) {
1345 u32 trans_dp_ctl_reg
= TRANS_DP_CTL(pipe
);
1346 u32 trans_dp_ctl
= I915_READ(trans_dp_ctl_reg
);
1347 if ((trans_dp_ctl
& TRANS_DP_PORT_SEL_MASK
) != port_sel
)
1350 if ((val
& DP_PIPE_MASK
) != (pipe
<< 30))
1356 static bool hdmi_pipe_enabled(struct drm_i915_private
*dev_priv
,
1357 enum i915_pipe pipe
, u32 val
)
1359 if ((val
& SDVO_ENABLE
) == 0)
1362 if (HAS_PCH_CPT(dev_priv
->dev
)) {
1363 if ((val
& SDVO_PIPE_SEL_MASK_CPT
) != SDVO_PIPE_SEL_CPT(pipe
))
1366 if ((val
& SDVO_PIPE_SEL_MASK
) != SDVO_PIPE_SEL(pipe
))
1372 static bool lvds_pipe_enabled(struct drm_i915_private
*dev_priv
,
1373 enum i915_pipe pipe
, u32 val
)
1375 if ((val
& LVDS_PORT_EN
) == 0)
1378 if (HAS_PCH_CPT(dev_priv
->dev
)) {
1379 if ((val
& PORT_TRANS_SEL_MASK
) != PORT_TRANS_SEL_CPT(pipe
))
1382 if ((val
& LVDS_PIPE_MASK
) != LVDS_PIPE(pipe
))
1388 static bool adpa_pipe_enabled(struct drm_i915_private
*dev_priv
,
1389 enum i915_pipe pipe
, u32 val
)
1391 if ((val
& ADPA_DAC_ENABLE
) == 0)
1393 if (HAS_PCH_CPT(dev_priv
->dev
)) {
1394 if ((val
& PORT_TRANS_SEL_MASK
) != PORT_TRANS_SEL_CPT(pipe
))
1397 if ((val
& ADPA_PIPE_SELECT_MASK
) != ADPA_PIPE_SELECT(pipe
))
1403 static void assert_pch_dp_disabled(struct drm_i915_private
*dev_priv
,
1404 enum i915_pipe pipe
, int reg
, u32 port_sel
)
1406 u32 val
= I915_READ(reg
);
1407 WARN(dp_pipe_enabled(dev_priv
, pipe
, port_sel
, val
),
1408 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1409 reg
, pipe_name(pipe
));
1411 WARN(HAS_PCH_IBX(dev_priv
->dev
) && (val
& DP_PORT_EN
) == 0
1412 && (val
& DP_PIPEB_SELECT
),
1413 "IBX PCH dp port still using transcoder B\n");
1416 static void assert_pch_hdmi_disabled(struct drm_i915_private
*dev_priv
,
1417 enum i915_pipe pipe
, int reg
)
1419 u32 val
= I915_READ(reg
);
1420 WARN(hdmi_pipe_enabled(dev_priv
, pipe
, val
),
1421 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1422 reg
, pipe_name(pipe
));
1424 WARN(HAS_PCH_IBX(dev_priv
->dev
) && (val
& SDVO_ENABLE
) == 0
1425 && (val
& SDVO_PIPE_B_SELECT
),
1426 "IBX PCH hdmi port still using transcoder B\n");
1429 static void assert_pch_ports_disabled(struct drm_i915_private
*dev_priv
,
1430 enum i915_pipe pipe
)
1435 assert_pch_dp_disabled(dev_priv
, pipe
, PCH_DP_B
, TRANS_DP_PORT_SEL_B
);
1436 assert_pch_dp_disabled(dev_priv
, pipe
, PCH_DP_C
, TRANS_DP_PORT_SEL_C
);
1437 assert_pch_dp_disabled(dev_priv
, pipe
, PCH_DP_D
, TRANS_DP_PORT_SEL_D
);
1440 val
= I915_READ(reg
);
1441 WARN(adpa_pipe_enabled(dev_priv
, pipe
, val
),
1442 "PCH VGA enabled on transcoder %c, should be disabled\n",
1446 val
= I915_READ(reg
);
1447 WARN(lvds_pipe_enabled(dev_priv
, pipe
, val
),
1448 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1451 assert_pch_hdmi_disabled(dev_priv
, pipe
, PCH_HDMIB
);
1452 assert_pch_hdmi_disabled(dev_priv
, pipe
, PCH_HDMIC
);
1453 assert_pch_hdmi_disabled(dev_priv
, pipe
, PCH_HDMID
);
1457 * intel_enable_pll - enable a PLL
1458 * @dev_priv: i915 private structure
1459 * @pipe: pipe PLL to enable
1461 * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
1462 * make sure the PLL reg is writable first though, since the panel write
1463 * protect mechanism may be enabled.
1465 * Note! This is for pre-ILK only.
1467 * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
1469 static void intel_enable_pll(struct drm_i915_private
*dev_priv
, enum i915_pipe pipe
)
1474 /* No really, not for ILK+ */
1475 BUG_ON(!IS_VALLEYVIEW(dev_priv
->dev
) && dev_priv
->info
->gen
>= 5);
1477 /* PLL is protected by panel, make sure we can write it */
1478 if (IS_MOBILE(dev_priv
->dev
) && !IS_I830(dev_priv
->dev
))
1479 assert_panel_unlocked(dev_priv
, pipe
);
1482 val
= I915_READ(reg
);
1483 val
|= DPLL_VCO_ENABLE
;
1485 /* We do this three times for luck */
1486 I915_WRITE(reg
, val
);
1488 udelay(150); /* wait for warmup */
1489 I915_WRITE(reg
, val
);
1491 udelay(150); /* wait for warmup */
1492 I915_WRITE(reg
, val
);
1494 udelay(150); /* wait for warmup */
1498 * intel_disable_pll - disable a PLL
1499 * @dev_priv: i915 private structure
1500 * @pipe: pipe PLL to disable
1502 * Disable the PLL for @pipe, making sure the pipe is off first.
1504 * Note! This is for pre-ILK only.
1506 static void intel_disable_pll(struct drm_i915_private
*dev_priv
, enum i915_pipe pipe
)
1511 /* Don't disable pipe A or pipe A PLLs if needed */
1512 if (pipe
== PIPE_A
&& (dev_priv
->quirks
& QUIRK_PIPEA_FORCE
))
1515 /* Make sure the pipe isn't still relying on us */
1516 assert_pipe_disabled(dev_priv
, pipe
);
1519 val
= I915_READ(reg
);
1520 val
&= ~DPLL_VCO_ENABLE
;
1521 I915_WRITE(reg
, val
);
1527 intel_sbi_write(struct drm_i915_private
*dev_priv
, u16 reg
, u32 value
,
1528 enum intel_sbi_destination destination
)
1532 WARN_ON(!mutex_is_locked(&dev_priv
->dpio_lock
));
1534 if (wait_for((I915_READ(SBI_CTL_STAT
) & SBI_BUSY
) == 0, 100)) {
1535 DRM_ERROR("timeout waiting for SBI to become ready\n");
1539 I915_WRITE(SBI_ADDR
, (reg
<< 16));
1540 I915_WRITE(SBI_DATA
, value
);
1542 if (destination
== SBI_ICLK
)
1543 tmp
= SBI_CTL_DEST_ICLK
| SBI_CTL_OP_CRWR
;
1545 tmp
= SBI_CTL_DEST_MPHY
| SBI_CTL_OP_IOWR
;
1546 I915_WRITE(SBI_CTL_STAT
, SBI_BUSY
| tmp
);
1548 if (wait_for((I915_READ(SBI_CTL_STAT
) & (SBI_BUSY
| SBI_RESPONSE_FAIL
)) == 0,
1550 DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
1556 intel_sbi_read(struct drm_i915_private
*dev_priv
, u16 reg
,
1557 enum intel_sbi_destination destination
)
1561 WARN_ON(!mutex_is_locked(&dev_priv
->dpio_lock
));
1563 if (wait_for((I915_READ(SBI_CTL_STAT
) & SBI_BUSY
) == 0, 100)) {
1564 DRM_ERROR("timeout waiting for SBI to become ready\n");
1568 I915_WRITE(SBI_ADDR
, (reg
<< 16));
1570 if (destination
== SBI_ICLK
)
1571 value
= SBI_CTL_DEST_ICLK
| SBI_CTL_OP_CRRD
;
1573 value
= SBI_CTL_DEST_MPHY
| SBI_CTL_OP_IORD
;
1574 I915_WRITE(SBI_CTL_STAT
, value
| SBI_BUSY
);
1576 if (wait_for((I915_READ(SBI_CTL_STAT
) & (SBI_BUSY
| SBI_RESPONSE_FAIL
)) == 0,
1578 DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
1582 return I915_READ(SBI_DATA
);
1586 * ironlake_enable_pch_pll - enable PCH PLL
1587 * @dev_priv: i915 private structure
1588 * @pipe: pipe PLL to enable
1590 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1591 * drives the transcoder clock.
1593 static void ironlake_enable_pch_pll(struct intel_crtc
*intel_crtc
)
1595 struct drm_i915_private
*dev_priv
= intel_crtc
->base
.dev
->dev_private
;
1596 struct intel_pch_pll
*pll
;
1600 /* PCH PLLs only available on ILK, SNB and IVB */
1601 BUG_ON(dev_priv
->info
->gen
< 5);
1602 pll
= intel_crtc
->pch_pll
;
1606 if (WARN_ON(pll
->refcount
== 0))
1609 DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
1610 pll
->pll_reg
, pll
->active
, pll
->on
,
1611 intel_crtc
->base
.base
.id
);
1613 /* PCH refclock must be enabled first */
1614 assert_pch_refclk_enabled(dev_priv
);
1616 if (pll
->active
++ && pll
->on
) {
1617 assert_pch_pll_enabled(dev_priv
, pll
, NULL
);
1621 DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll
->pll_reg
);
1624 val
= I915_READ(reg
);
1625 val
|= DPLL_VCO_ENABLE
;
1626 I915_WRITE(reg
, val
);
1633 static void intel_disable_pch_pll(struct intel_crtc
*intel_crtc
)
1635 struct drm_i915_private
*dev_priv
= intel_crtc
->base
.dev
->dev_private
;
1636 struct intel_pch_pll
*pll
= intel_crtc
->pch_pll
;
1640 /* PCH only available on ILK+ */
1641 BUG_ON(dev_priv
->info
->gen
< 5);
1645 if (WARN_ON(pll
->refcount
== 0))
1648 DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
1649 pll
->pll_reg
, pll
->active
, pll
->on
,
1650 intel_crtc
->base
.base
.id
);
1652 if (WARN_ON(pll
->active
== 0)) {
1653 assert_pch_pll_disabled(dev_priv
, pll
, NULL
);
1657 if (--pll
->active
) {
1658 assert_pch_pll_enabled(dev_priv
, pll
, NULL
);
1662 DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll
->pll_reg
);
1664 /* Make sure transcoder isn't still depending on us */
1665 assert_transcoder_disabled(dev_priv
, intel_crtc
->pipe
);
1668 val
= I915_READ(reg
);
1669 val
&= ~DPLL_VCO_ENABLE
;
1670 I915_WRITE(reg
, val
);
1677 static void ironlake_enable_pch_transcoder(struct drm_i915_private
*dev_priv
,
1678 enum i915_pipe pipe
)
1680 struct drm_device
*dev
= dev_priv
->dev
;
1681 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
1682 uint32_t reg
, val
, pipeconf_val
;
1684 /* PCH only available on ILK+ */
1685 BUG_ON(dev_priv
->info
->gen
< 5);
1687 /* Make sure PCH DPLL is enabled */
1688 assert_pch_pll_enabled(dev_priv
,
1689 to_intel_crtc(crtc
)->pch_pll
,
1690 to_intel_crtc(crtc
));
1692 /* FDI must be feeding us bits for PCH ports */
1693 assert_fdi_tx_enabled(dev_priv
, pipe
);
1694 assert_fdi_rx_enabled(dev_priv
, pipe
);
1696 if (HAS_PCH_CPT(dev
)) {
1697 /* Workaround: Set the timing override bit before enabling the
1698 * pch transcoder. */
1699 reg
= TRANS_CHICKEN2(pipe
);
1700 val
= I915_READ(reg
);
1701 val
|= TRANS_CHICKEN2_TIMING_OVERRIDE
;
1702 I915_WRITE(reg
, val
);
1705 reg
= TRANSCONF(pipe
);
1706 val
= I915_READ(reg
);
1707 pipeconf_val
= I915_READ(PIPECONF(pipe
));
1709 if (HAS_PCH_IBX(dev_priv
->dev
)) {
1711 * make the BPC in transcoder be consistent with
1712 * that in pipeconf reg.
1714 val
&= ~PIPECONF_BPC_MASK
;
1715 val
|= pipeconf_val
& PIPECONF_BPC_MASK
;
1718 val
&= ~TRANS_INTERLACE_MASK
;
1719 if ((pipeconf_val
& PIPECONF_INTERLACE_MASK
) == PIPECONF_INTERLACED_ILK
)
1720 if (HAS_PCH_IBX(dev_priv
->dev
) &&
1721 intel_pipe_has_type(crtc
, INTEL_OUTPUT_SDVO
))
1722 val
|= TRANS_LEGACY_INTERLACED_ILK
;
1724 val
|= TRANS_INTERLACED
;
1726 val
|= TRANS_PROGRESSIVE
;
1728 I915_WRITE(reg
, val
| TRANS_ENABLE
);
1729 if (wait_for(I915_READ(reg
) & TRANS_STATE_ENABLE
, 100))
1730 DRM_ERROR("failed to enable transcoder %d\n", pipe
);
1733 static void lpt_enable_pch_transcoder(struct drm_i915_private
*dev_priv
,
1734 enum transcoder cpu_transcoder
)
1736 u32 val
, pipeconf_val
;
1738 /* PCH only available on ILK+ */
1739 BUG_ON(dev_priv
->info
->gen
< 5);
1741 /* FDI must be feeding us bits for PCH ports */
1742 assert_fdi_tx_enabled(dev_priv
, (enum i915_pipe
) cpu_transcoder
);
1743 assert_fdi_rx_enabled(dev_priv
, TRANSCODER_A
);
1745 /* Workaround: set timing override bit. */
1746 val
= I915_READ(_TRANSA_CHICKEN2
);
1747 val
|= TRANS_CHICKEN2_TIMING_OVERRIDE
;
1748 I915_WRITE(_TRANSA_CHICKEN2
, val
);
1751 pipeconf_val
= I915_READ(PIPECONF(cpu_transcoder
));
1753 if ((pipeconf_val
& PIPECONF_INTERLACE_MASK_HSW
) ==
1754 PIPECONF_INTERLACED_ILK
)
1755 val
|= TRANS_INTERLACED
;
1757 val
|= TRANS_PROGRESSIVE
;
1759 I915_WRITE(TRANSCONF(TRANSCODER_A
), val
);
1760 if (wait_for(I915_READ(_TRANSACONF
) & TRANS_STATE_ENABLE
, 100))
1761 DRM_ERROR("Failed to enable PCH transcoder\n");
1764 static void ironlake_disable_pch_transcoder(struct drm_i915_private
*dev_priv
,
1765 enum i915_pipe pipe
)
1767 struct drm_device
*dev
= dev_priv
->dev
;
1770 /* FDI relies on the transcoder */
1771 assert_fdi_tx_disabled(dev_priv
, pipe
);
1772 assert_fdi_rx_disabled(dev_priv
, pipe
);
1774 /* Ports must be off as well */
1775 assert_pch_ports_disabled(dev_priv
, pipe
);
1777 reg
= TRANSCONF(pipe
);
1778 val
= I915_READ(reg
);
1779 val
&= ~TRANS_ENABLE
;
1780 I915_WRITE(reg
, val
);
1781 /* wait for PCH transcoder off, transcoder state */
1782 if (wait_for((I915_READ(reg
) & TRANS_STATE_ENABLE
) == 0, 50))
1783 DRM_ERROR("failed to disable transcoder %d\n", pipe
);
1785 if (!HAS_PCH_IBX(dev
)) {
1786 /* Workaround: Clear the timing override chicken bit again. */
1787 reg
= TRANS_CHICKEN2(pipe
);
1788 val
= I915_READ(reg
);
1789 val
&= ~TRANS_CHICKEN2_TIMING_OVERRIDE
;
1790 I915_WRITE(reg
, val
);
1794 static void lpt_disable_pch_transcoder(struct drm_i915_private
*dev_priv
)
1798 val
= I915_READ(_TRANSACONF
);
1799 val
&= ~TRANS_ENABLE
;
1800 I915_WRITE(_TRANSACONF
, val
);
1801 /* wait for PCH transcoder off, transcoder state */
1802 if (wait_for((I915_READ(_TRANSACONF
) & TRANS_STATE_ENABLE
) == 0, 50))
1803 DRM_ERROR("Failed to disable PCH transcoder\n");
1805 /* Workaround: clear timing override bit. */
1806 val
= I915_READ(_TRANSA_CHICKEN2
);
1807 val
&= ~TRANS_CHICKEN2_TIMING_OVERRIDE
;
1808 I915_WRITE(_TRANSA_CHICKEN2
, val
);
1812 * intel_enable_pipe - enable a pipe, asserting requirements
1813 * @dev_priv: i915 private structure
1814 * @pipe: pipe to enable
1815 * @pch_port: on ILK+, is this pipe driving a PCH port or not
1817 * Enable @pipe, making sure that various hardware specific requirements
1818 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1820 * @pipe should be %PIPE_A or %PIPE_B.
1822 * Will wait until the pipe is actually running (i.e. first vblank) before
1825 static void intel_enable_pipe(struct drm_i915_private
*dev_priv
, enum i915_pipe pipe
,
1828 enum transcoder cpu_transcoder
= intel_pipe_to_cpu_transcoder(dev_priv
,
1830 enum i915_pipe pch_transcoder
;
1834 if (HAS_PCH_LPT(dev_priv
->dev
))
1835 pch_transcoder
= TRANSCODER_A
;
1837 pch_transcoder
= pipe
;
1840 * A pipe without a PLL won't actually be able to drive bits from
1841 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1844 if (!HAS_PCH_SPLIT(dev_priv
->dev
))
1845 assert_pll_enabled(dev_priv
, pipe
);
1848 /* if driving the PCH, we need FDI enabled */
1849 assert_fdi_rx_pll_enabled(dev_priv
, pch_transcoder
);
1850 assert_fdi_tx_pll_enabled(dev_priv
,
1851 (enum i915_pipe
) cpu_transcoder
);
1853 /* FIXME: assert CPU port conditions for SNB+ */
1856 reg
= PIPECONF(cpu_transcoder
);
1857 val
= I915_READ(reg
);
1858 if (val
& PIPECONF_ENABLE
)
1861 I915_WRITE(reg
, val
| PIPECONF_ENABLE
);
1862 intel_wait_for_vblank(dev_priv
->dev
, pipe
);
1866 * intel_disable_pipe - disable a pipe, asserting requirements
1867 * @dev_priv: i915 private structure
1868 * @pipe: pipe to disable
1870 * Disable @pipe, making sure that various hardware specific requirements
1871 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1873 * @pipe should be %PIPE_A or %PIPE_B.
1875 * Will wait until the pipe has shut down before returning.
1877 static void intel_disable_pipe(struct drm_i915_private
*dev_priv
,
1878 enum i915_pipe pipe
)
1880 enum transcoder cpu_transcoder
= intel_pipe_to_cpu_transcoder(dev_priv
,
1886 * Make sure planes won't keep trying to pump pixels to us,
1887 * or we might hang the display.
1889 assert_planes_disabled(dev_priv
, pipe
);
1890 assert_sprites_disabled(dev_priv
, pipe
);
1892 /* Don't disable pipe A or pipe A PLLs if needed */
1893 if (pipe
== PIPE_A
&& (dev_priv
->quirks
& QUIRK_PIPEA_FORCE
))
1896 reg
= PIPECONF(cpu_transcoder
);
1897 val
= I915_READ(reg
);
1898 if ((val
& PIPECONF_ENABLE
) == 0)
1901 I915_WRITE(reg
, val
& ~PIPECONF_ENABLE
);
1902 intel_wait_for_pipe_off(dev_priv
->dev
, pipe
);
1906 * Plane regs are double buffered, going from enabled->disabled needs a
1907 * trigger in order to latch. The display address reg provides this.
1909 void intel_flush_display_plane(struct drm_i915_private
*dev_priv
,
1912 if (dev_priv
->info
->gen
>= 4)
1913 I915_WRITE(DSPSURF(plane
), I915_READ(DSPSURF(plane
)));
1915 I915_WRITE(DSPADDR(plane
), I915_READ(DSPADDR(plane
)));
1919 * intel_enable_plane - enable a display plane on a given pipe
1920 * @dev_priv: i915 private structure
1921 * @plane: plane to enable
1922 * @pipe: pipe being fed
1924 * Enable @plane on @pipe, making sure that @pipe is running first.
1926 static void intel_enable_plane(struct drm_i915_private
*dev_priv
,
1927 enum plane plane
, enum i915_pipe pipe
)
1932 /* If the pipe isn't enabled, we can't pump pixels and may hang */
1933 assert_pipe_enabled(dev_priv
, pipe
);
1935 reg
= DSPCNTR(plane
);
1936 val
= I915_READ(reg
);
1937 if (val
& DISPLAY_PLANE_ENABLE
)
1940 I915_WRITE(reg
, val
| DISPLAY_PLANE_ENABLE
);
1941 intel_flush_display_plane(dev_priv
, plane
);
1942 intel_wait_for_vblank(dev_priv
->dev
, pipe
);
1946 * intel_disable_plane - disable a display plane
1947 * @dev_priv: i915 private structure
1948 * @plane: plane to disable
1949 * @pipe: pipe consuming the data
1951 * Disable @plane; should be an independent operation.
1953 static void intel_disable_plane(struct drm_i915_private
*dev_priv
,
1954 enum plane plane
, enum i915_pipe pipe
)
1959 reg
= DSPCNTR(plane
);
1960 val
= I915_READ(reg
);
1961 if ((val
& DISPLAY_PLANE_ENABLE
) == 0)
1964 I915_WRITE(reg
, val
& ~DISPLAY_PLANE_ENABLE
);
1965 intel_flush_display_plane(dev_priv
, plane
);
1966 intel_wait_for_vblank(dev_priv
->dev
, pipe
);
1969 static bool need_vtd_wa(struct drm_device
*dev
)
1971 #ifdef CONFIG_INTEL_IOMMU
1972 if (INTEL_INFO(dev
)->gen
>= 6 && intel_iommu_gfx_mapped
)
1979 intel_pin_and_fence_fb_obj(struct drm_device
*dev
,
1980 struct drm_i915_gem_object
*obj
,
1981 struct intel_ring_buffer
*pipelined
)
1983 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1987 switch (obj
->tiling_mode
) {
1988 case I915_TILING_NONE
:
1989 if (IS_BROADWATER(dev
) || IS_CRESTLINE(dev
))
1990 alignment
= 128 * 1024;
1991 else if (INTEL_INFO(dev
)->gen
>= 4)
1992 alignment
= 4 * 1024;
1994 alignment
= 64 * 1024;
1997 /* pin() will align the object as required by fence */
2001 /* Despite that we check this in framebuffer_init userspace can
2002 * screw us over and change the tiling after the fact. Only
2003 * pinned buffers can't change their tiling. */
2004 DRM_DEBUG_DRIVER("Y tiled not allowed for scan out buffers\n");
2010 /* Note that the w/a also requires 64 PTE of padding following the
2011 * bo. We currently fill all unused PTE with the shadow page and so
2012 * we should always have valid PTE following the scanout preventing
2015 if (need_vtd_wa(dev
) && alignment
< 256 * 1024)
2016 alignment
= 256 * 1024;
2018 dev_priv
->mm
.interruptible
= false;
2019 ret
= i915_gem_object_pin_to_display_plane(obj
, alignment
, pipelined
);
2021 goto err_interruptible
;
2023 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2024 * fence, whereas 965+ only requires a fence if using
2025 * framebuffer compression. For simplicity, we always install
2026 * a fence as the cost is not that onerous.
2028 ret
= i915_gem_object_get_fence(obj
);
2032 i915_gem_object_pin_fence(obj
);
2034 dev_priv
->mm
.interruptible
= true;
2038 i915_gem_object_unpin(obj
);
2040 dev_priv
->mm
.interruptible
= true;
2044 void intel_unpin_fb_obj(struct drm_i915_gem_object
*obj
)
2046 i915_gem_object_unpin_fence(obj
);
2047 i915_gem_object_unpin(obj
);
2050 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2051 * is assumed to be a power-of-two. */
2052 unsigned long intel_gen4_compute_page_offset(int *x
, int *y
,
2053 unsigned int tiling_mode
,
2057 if (tiling_mode
!= I915_TILING_NONE
) {
2058 unsigned int tile_rows
, tiles
;
2063 tiles
= *x
/ (512/cpp
);
2066 return tile_rows
* pitch
* 8 + tiles
* 4096;
2068 unsigned int offset
;
2070 offset
= *y
* pitch
+ *x
* cpp
;
2072 *x
= (offset
& 4095) / cpp
;
2073 return offset
& -4096;
2077 static int i9xx_update_plane(struct drm_crtc
*crtc
, struct drm_framebuffer
*fb
,
2080 struct drm_device
*dev
= crtc
->dev
;
2081 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2082 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2083 struct intel_framebuffer
*intel_fb
;
2084 struct drm_i915_gem_object
*obj
;
2085 int plane
= intel_crtc
->plane
;
2086 unsigned long linear_offset
;
2095 DRM_ERROR("Can't update plane %d in SAREA\n", plane
);
2099 intel_fb
= to_intel_framebuffer(fb
);
2100 obj
= intel_fb
->obj
;
2102 reg
= DSPCNTR(plane
);
2103 dspcntr
= I915_READ(reg
);
2104 /* Mask out pixel format bits in case we change it */
2105 dspcntr
&= ~DISPPLANE_PIXFORMAT_MASK
;
2106 switch (fb
->pixel_format
) {
2108 dspcntr
|= DISPPLANE_8BPP
;
2110 case DRM_FORMAT_XRGB1555
:
2111 case DRM_FORMAT_ARGB1555
:
2112 dspcntr
|= DISPPLANE_BGRX555
;
2114 case DRM_FORMAT_RGB565
:
2115 dspcntr
|= DISPPLANE_BGRX565
;
2117 case DRM_FORMAT_XRGB8888
:
2118 case DRM_FORMAT_ARGB8888
:
2119 dspcntr
|= DISPPLANE_BGRX888
;
2121 case DRM_FORMAT_XBGR8888
:
2122 case DRM_FORMAT_ABGR8888
:
2123 dspcntr
|= DISPPLANE_RGBX888
;
2125 case DRM_FORMAT_XRGB2101010
:
2126 case DRM_FORMAT_ARGB2101010
:
2127 dspcntr
|= DISPPLANE_BGRX101010
;
2129 case DRM_FORMAT_XBGR2101010
:
2130 case DRM_FORMAT_ABGR2101010
:
2131 dspcntr
|= DISPPLANE_RGBX101010
;
2137 if (INTEL_INFO(dev
)->gen
>= 4) {
2138 if (obj
->tiling_mode
!= I915_TILING_NONE
)
2139 dspcntr
|= DISPPLANE_TILED
;
2141 dspcntr
&= ~DISPPLANE_TILED
;
2144 I915_WRITE(reg
, dspcntr
);
2146 linear_offset
= y
* fb
->pitches
[0] + x
* (fb
->bits_per_pixel
/ 8);
2148 if (INTEL_INFO(dev
)->gen
>= 4) {
2149 intel_crtc
->dspaddr_offset
=
2150 intel_gen4_compute_page_offset(&x
, &y
, obj
->tiling_mode
,
2151 fb
->bits_per_pixel
/ 8,
2153 linear_offset
-= intel_crtc
->dspaddr_offset
;
2155 intel_crtc
->dspaddr_offset
= linear_offset
;
2158 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
2159 obj
->gtt_offset
, linear_offset
, x
, y
, fb
->pitches
[0]);
2160 I915_WRITE(DSPSTRIDE(plane
), fb
->pitches
[0]);
2161 if (INTEL_INFO(dev
)->gen
>= 4) {
2162 I915_MODIFY_DISPBASE(DSPSURF(plane
),
2163 obj
->gtt_offset
+ intel_crtc
->dspaddr_offset
);
2164 I915_WRITE(DSPTILEOFF(plane
), (y
<< 16) | x
);
2165 I915_WRITE(DSPLINOFF(plane
), linear_offset
);
2167 I915_WRITE(DSPADDR(plane
), obj
->gtt_offset
+ linear_offset
);
2173 static int ironlake_update_plane(struct drm_crtc
*crtc
,
2174 struct drm_framebuffer
*fb
, int x
, int y
)
2176 struct drm_device
*dev
= crtc
->dev
;
2177 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2178 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2179 struct intel_framebuffer
*intel_fb
;
2180 struct drm_i915_gem_object
*obj
;
2181 int plane
= intel_crtc
->plane
;
2182 unsigned long linear_offset
;
2192 DRM_ERROR("Can't update plane %d in SAREA\n", plane
);
2196 intel_fb
= to_intel_framebuffer(fb
);
2197 obj
= intel_fb
->obj
;
2199 reg
= DSPCNTR(plane
);
2200 dspcntr
= I915_READ(reg
);
2201 /* Mask out pixel format bits in case we change it */
2202 dspcntr
&= ~DISPPLANE_PIXFORMAT_MASK
;
2203 switch (fb
->pixel_format
) {
2205 dspcntr
|= DISPPLANE_8BPP
;
2207 case DRM_FORMAT_RGB565
:
2208 dspcntr
|= DISPPLANE_BGRX565
;
2210 case DRM_FORMAT_XRGB8888
:
2211 case DRM_FORMAT_ARGB8888
:
2212 dspcntr
|= DISPPLANE_BGRX888
;
2214 case DRM_FORMAT_XBGR8888
:
2215 case DRM_FORMAT_ABGR8888
:
2216 dspcntr
|= DISPPLANE_RGBX888
;
2218 case DRM_FORMAT_XRGB2101010
:
2219 case DRM_FORMAT_ARGB2101010
:
2220 dspcntr
|= DISPPLANE_BGRX101010
;
2222 case DRM_FORMAT_XBGR2101010
:
2223 case DRM_FORMAT_ABGR2101010
:
2224 dspcntr
|= DISPPLANE_RGBX101010
;
2230 if (obj
->tiling_mode
!= I915_TILING_NONE
)
2231 dspcntr
|= DISPPLANE_TILED
;
2233 dspcntr
&= ~DISPPLANE_TILED
;
2236 dspcntr
|= DISPPLANE_TRICKLE_FEED_DISABLE
;
2238 I915_WRITE(reg
, dspcntr
);
2240 linear_offset
= y
* fb
->pitches
[0] + x
* (fb
->bits_per_pixel
/ 8);
2241 intel_crtc
->dspaddr_offset
=
2242 intel_gen4_compute_page_offset(&x
, &y
, obj
->tiling_mode
,
2243 fb
->bits_per_pixel
/ 8,
2245 linear_offset
-= intel_crtc
->dspaddr_offset
;
2247 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
2248 obj
->gtt_offset
, linear_offset
, x
, y
, fb
->pitches
[0]);
2249 I915_WRITE(DSPSTRIDE(plane
), fb
->pitches
[0]);
2250 I915_MODIFY_DISPBASE(DSPSURF(plane
),
2251 obj
->gtt_offset
+ intel_crtc
->dspaddr_offset
);
2252 if (IS_HASWELL(dev
)) {
2253 I915_WRITE(DSPOFFSET(plane
), (y
<< 16) | x
);
2255 I915_WRITE(DSPTILEOFF(plane
), (y
<< 16) | x
);
2256 I915_WRITE(DSPLINOFF(plane
), linear_offset
);
2263 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2265 intel_pipe_set_base_atomic(struct drm_crtc
*crtc
, struct drm_framebuffer
*fb
,
2266 int x
, int y
, enum mode_set_atomic state
)
2268 struct drm_device
*dev
= crtc
->dev
;
2269 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2271 if (dev_priv
->display
.disable_fbc
)
2272 dev_priv
->display
.disable_fbc(dev
);
2273 intel_increase_pllclock(crtc
);
2275 return dev_priv
->display
.update_plane(crtc
, fb
, x
, y
);
2278 void intel_display_handle_reset(struct drm_device
*dev
)
2280 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2281 struct drm_crtc
*crtc
;
2284 * Flips in the rings have been nuked by the reset,
2285 * so complete all pending flips so that user space
2286 * will get its events and not get stuck.
2288 * Also update the base address of all primary
2289 * planes to the the last fb to make sure we're
2290 * showing the correct fb after a reset.
2292 * Need to make two loops over the crtcs so that we
2293 * don't try to grab a crtc mutex before the
2294 * pending_flip_queue really got woken up.
2297 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
2298 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2299 enum plane plane
= intel_crtc
->plane
;
2301 intel_prepare_page_flip(dev
, plane
);
2302 intel_finish_page_flip_plane(dev
, plane
);
2305 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
2306 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2308 mutex_lock(&crtc
->mutex
);
2309 if (intel_crtc
->active
)
2310 dev_priv
->display
.update_plane(crtc
, crtc
->fb
,
2312 mutex_unlock(&crtc
->mutex
);
2317 intel_finish_fb(struct drm_framebuffer
*old_fb
)
2319 struct drm_i915_gem_object
*obj
= to_intel_framebuffer(old_fb
)->obj
;
2320 struct drm_i915_private
*dev_priv
= obj
->base
.dev
->dev_private
;
2321 bool was_interruptible
= dev_priv
->mm
.interruptible
;
2324 /* Big Hammer, we also need to ensure that any pending
2325 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2326 * current scanout is retired before unpinning the old
2329 * This should only fail upon a hung GPU, in which case we
2330 * can safely continue.
2332 dev_priv
->mm
.interruptible
= false;
2333 ret
= i915_gem_object_finish_gpu(obj
);
2334 dev_priv
->mm
.interruptible
= was_interruptible
;
2339 static void intel_crtc_update_sarea_pos(struct drm_crtc
*crtc
, int x
, int y
)
2341 struct drm_device
*dev
= crtc
->dev
;
2343 struct drm_i915_master_private
*master_priv
;
2345 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2347 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2350 if (!dev
->primary
->master
)
2353 master_priv
= dev
->primary
->master
->driver_priv
;
2354 if (!master_priv
->sarea_priv
)
2357 if (!dev_priv
->sarea_priv
)
2361 switch (intel_crtc
->pipe
) {
2364 master_priv
->sarea_priv
->pipeA_x
= x
;
2365 master_priv
->sarea_priv
->pipeA_y
= y
;
2367 dev_priv
->sarea_priv
->planeA_x
= x
;
2368 dev_priv
->sarea_priv
->planeA_y
= y
;
2373 master_priv
->sarea_priv
->pipeB_x
= x
;
2374 master_priv
->sarea_priv
->pipeB_y
= y
;
2376 dev_priv
->sarea_priv
->planeB_x
= x
;
2377 dev_priv
->sarea_priv
->planeB_y
= y
;
2386 intel_pipe_set_base(struct drm_crtc
*crtc
, int x
, int y
,
2387 struct drm_framebuffer
*fb
)
2389 struct drm_device
*dev
= crtc
->dev
;
2390 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2391 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2392 struct drm_framebuffer
*old_fb
;
2397 DRM_ERROR("No FB bound\n");
2401 if (intel_crtc
->plane
> INTEL_INFO(dev
)->num_pipes
) {
2402 DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
2404 INTEL_INFO(dev
)->num_pipes
);
2408 mutex_lock(&dev
->struct_mutex
);
2409 ret
= intel_pin_and_fence_fb_obj(dev
,
2410 to_intel_framebuffer(fb
)->obj
,
2413 mutex_unlock(&dev
->struct_mutex
);
2414 DRM_ERROR("pin & fence failed\n");
2418 ret
= dev_priv
->display
.update_plane(crtc
, fb
, x
, y
);
2420 intel_unpin_fb_obj(to_intel_framebuffer(fb
)->obj
);
2421 mutex_unlock(&dev
->struct_mutex
);
2422 DRM_ERROR("failed to update base address\n");
2432 intel_wait_for_vblank(dev
, intel_crtc
->pipe
);
2433 intel_unpin_fb_obj(to_intel_framebuffer(old_fb
)->obj
);
2436 intel_update_fbc(dev
);
2437 mutex_unlock(&dev
->struct_mutex
);
2439 intel_crtc_update_sarea_pos(crtc
, x
, y
);
2444 static void intel_fdi_normal_train(struct drm_crtc
*crtc
)
2446 struct drm_device
*dev
= crtc
->dev
;
2447 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2448 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2449 int pipe
= intel_crtc
->pipe
;
2452 /* enable normal train */
2453 reg
= FDI_TX_CTL(pipe
);
2454 temp
= I915_READ(reg
);
2455 if (IS_IVYBRIDGE(dev
)) {
2456 temp
&= ~FDI_LINK_TRAIN_NONE_IVB
;
2457 temp
|= FDI_LINK_TRAIN_NONE_IVB
| FDI_TX_ENHANCE_FRAME_ENABLE
;
2459 temp
&= ~FDI_LINK_TRAIN_NONE
;
2460 temp
|= FDI_LINK_TRAIN_NONE
| FDI_TX_ENHANCE_FRAME_ENABLE
;
2462 I915_WRITE(reg
, temp
);
2464 reg
= FDI_RX_CTL(pipe
);
2465 temp
= I915_READ(reg
);
2466 if (HAS_PCH_CPT(dev
)) {
2467 temp
&= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT
;
2468 temp
|= FDI_LINK_TRAIN_NORMAL_CPT
;
2470 temp
&= ~FDI_LINK_TRAIN_NONE
;
2471 temp
|= FDI_LINK_TRAIN_NONE
;
2473 I915_WRITE(reg
, temp
| FDI_RX_ENHANCE_FRAME_ENABLE
);
2475 /* wait one idle pattern time */
2479 /* IVB wants error correction enabled */
2480 if (IS_IVYBRIDGE(dev
))
2481 I915_WRITE(reg
, I915_READ(reg
) | FDI_FS_ERRC_ENABLE
|
2482 FDI_FE_ERRC_ENABLE
);
2485 static void ivb_modeset_global_resources(struct drm_device
*dev
)
2487 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2488 struct intel_crtc
*pipe_B_crtc
=
2489 to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[PIPE_B
]);
2490 struct intel_crtc
*pipe_C_crtc
=
2491 to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[PIPE_C
]);
2494 /* When everything is off disable fdi C so that we could enable fdi B
2495 * with all lanes. XXX: This misses the case where a pipe is not using
2496 * any pch resources and so doesn't need any fdi lanes. */
2497 if (!pipe_B_crtc
->base
.enabled
&& !pipe_C_crtc
->base
.enabled
) {
2498 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B
)) & FDI_RX_ENABLE
);
2499 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C
)) & FDI_RX_ENABLE
);
2501 temp
= I915_READ(SOUTH_CHICKEN1
);
2502 temp
&= ~FDI_BC_BIFURCATION_SELECT
;
2503 DRM_DEBUG_KMS("disabling fdi C rx\n");
2504 I915_WRITE(SOUTH_CHICKEN1
, temp
);
2508 /* The FDI link training functions for ILK/Ibexpeak. */
2509 static void ironlake_fdi_link_train(struct drm_crtc
*crtc
)
2511 struct drm_device
*dev
= crtc
->dev
;
2512 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2513 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2514 int pipe
= intel_crtc
->pipe
;
2515 int plane
= intel_crtc
->plane
;
2516 u32 reg
, temp
, tries
;
2518 /* FDI needs bits from pipe & plane first */
2519 assert_pipe_enabled(dev_priv
, pipe
);
2520 assert_plane_enabled(dev_priv
, plane
);
2522 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2524 reg
= FDI_RX_IMR(pipe
);
2525 temp
= I915_READ(reg
);
2526 temp
&= ~FDI_RX_SYMBOL_LOCK
;
2527 temp
&= ~FDI_RX_BIT_LOCK
;
2528 I915_WRITE(reg
, temp
);
2532 /* enable CPU FDI TX and PCH FDI RX */
2533 reg
= FDI_TX_CTL(pipe
);
2534 temp
= I915_READ(reg
);
2536 temp
|= (intel_crtc
->fdi_lanes
- 1) << 19;
2537 temp
&= ~FDI_LINK_TRAIN_NONE
;
2538 temp
|= FDI_LINK_TRAIN_PATTERN_1
;
2539 I915_WRITE(reg
, temp
| FDI_TX_ENABLE
);
2541 reg
= FDI_RX_CTL(pipe
);
2542 temp
= I915_READ(reg
);
2543 temp
&= ~FDI_LINK_TRAIN_NONE
;
2544 temp
|= FDI_LINK_TRAIN_PATTERN_1
;
2545 I915_WRITE(reg
, temp
| FDI_RX_ENABLE
);
2550 /* Ironlake workaround, enable clock pointer after FDI enable*/
2551 I915_WRITE(FDI_RX_CHICKEN(pipe
), FDI_RX_PHASE_SYNC_POINTER_OVR
);
2552 I915_WRITE(FDI_RX_CHICKEN(pipe
), FDI_RX_PHASE_SYNC_POINTER_OVR
|
2553 FDI_RX_PHASE_SYNC_POINTER_EN
);
2555 reg
= FDI_RX_IIR(pipe
);
2556 for (tries
= 0; tries
< 5; tries
++) {
2557 temp
= I915_READ(reg
);
2558 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp
);
2560 if ((temp
& FDI_RX_BIT_LOCK
)) {
2561 DRM_DEBUG_KMS("FDI train 1 done.\n");
2562 I915_WRITE(reg
, temp
| FDI_RX_BIT_LOCK
);
2567 DRM_ERROR("FDI train 1 fail!\n");
2570 reg
= FDI_TX_CTL(pipe
);
2571 temp
= I915_READ(reg
);
2572 temp
&= ~FDI_LINK_TRAIN_NONE
;
2573 temp
|= FDI_LINK_TRAIN_PATTERN_2
;
2574 I915_WRITE(reg
, temp
);
2576 reg
= FDI_RX_CTL(pipe
);
2577 temp
= I915_READ(reg
);
2578 temp
&= ~FDI_LINK_TRAIN_NONE
;
2579 temp
|= FDI_LINK_TRAIN_PATTERN_2
;
2580 I915_WRITE(reg
, temp
);
2585 reg
= FDI_RX_IIR(pipe
);
2586 for (tries
= 0; tries
< 5; tries
++) {
2587 temp
= I915_READ(reg
);
2588 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp
);
2590 if (temp
& FDI_RX_SYMBOL_LOCK
) {
2591 I915_WRITE(reg
, temp
| FDI_RX_SYMBOL_LOCK
);
2592 DRM_DEBUG_KMS("FDI train 2 done.\n");
2597 DRM_ERROR("FDI train 2 fail!\n");
2599 DRM_DEBUG_KMS("FDI train done\n");
2603 static const int snb_b_fdi_train_param
[] = {
2604 FDI_LINK_TRAIN_400MV_0DB_SNB_B
,
2605 FDI_LINK_TRAIN_400MV_6DB_SNB_B
,
2606 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B
,
2607 FDI_LINK_TRAIN_800MV_0DB_SNB_B
,
2610 /* The FDI link training functions for SNB/Cougarpoint. */
2611 static void gen6_fdi_link_train(struct drm_crtc
*crtc
)
2613 struct drm_device
*dev
= crtc
->dev
;
2614 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2615 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2616 int pipe
= intel_crtc
->pipe
;
2617 u32 reg
, temp
, i
, retry
;
2619 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2621 reg
= FDI_RX_IMR(pipe
);
2622 temp
= I915_READ(reg
);
2623 temp
&= ~FDI_RX_SYMBOL_LOCK
;
2624 temp
&= ~FDI_RX_BIT_LOCK
;
2625 I915_WRITE(reg
, temp
);
2630 /* enable CPU FDI TX and PCH FDI RX */
2631 reg
= FDI_TX_CTL(pipe
);
2632 temp
= I915_READ(reg
);
2634 temp
|= (intel_crtc
->fdi_lanes
- 1) << 19;
2635 temp
&= ~FDI_LINK_TRAIN_NONE
;
2636 temp
|= FDI_LINK_TRAIN_PATTERN_1
;
2637 temp
&= ~FDI_LINK_TRAIN_VOL_EMP_MASK
;
2639 temp
|= FDI_LINK_TRAIN_400MV_0DB_SNB_B
;
2640 I915_WRITE(reg
, temp
| FDI_TX_ENABLE
);
2642 I915_WRITE(FDI_RX_MISC(pipe
),
2643 FDI_RX_TP1_TO_TP2_48
| FDI_RX_FDI_DELAY_90
);
2645 reg
= FDI_RX_CTL(pipe
);
2646 temp
= I915_READ(reg
);
2647 if (HAS_PCH_CPT(dev
)) {
2648 temp
&= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT
;
2649 temp
|= FDI_LINK_TRAIN_PATTERN_1_CPT
;
2651 temp
&= ~FDI_LINK_TRAIN_NONE
;
2652 temp
|= FDI_LINK_TRAIN_PATTERN_1
;
2654 I915_WRITE(reg
, temp
| FDI_RX_ENABLE
);
2659 for (i
= 0; i
< 4; i
++) {
2660 reg
= FDI_TX_CTL(pipe
);
2661 temp
= I915_READ(reg
);
2662 temp
&= ~FDI_LINK_TRAIN_VOL_EMP_MASK
;
2663 temp
|= snb_b_fdi_train_param
[i
];
2664 I915_WRITE(reg
, temp
);
2669 for (retry
= 0; retry
< 5; retry
++) {
2670 reg
= FDI_RX_IIR(pipe
);
2671 temp
= I915_READ(reg
);
2672 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp
);
2673 if (temp
& FDI_RX_BIT_LOCK
) {
2674 I915_WRITE(reg
, temp
| FDI_RX_BIT_LOCK
);
2675 DRM_DEBUG_KMS("FDI train 1 done.\n");
2684 DRM_ERROR("FDI train 1 fail!\n");
2687 reg
= FDI_TX_CTL(pipe
);
2688 temp
= I915_READ(reg
);
2689 temp
&= ~FDI_LINK_TRAIN_NONE
;
2690 temp
|= FDI_LINK_TRAIN_PATTERN_2
;
2692 temp
&= ~FDI_LINK_TRAIN_VOL_EMP_MASK
;
2694 temp
|= FDI_LINK_TRAIN_400MV_0DB_SNB_B
;
2696 I915_WRITE(reg
, temp
);
2698 reg
= FDI_RX_CTL(pipe
);
2699 temp
= I915_READ(reg
);
2700 if (HAS_PCH_CPT(dev
)) {
2701 temp
&= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT
;
2702 temp
|= FDI_LINK_TRAIN_PATTERN_2_CPT
;
2704 temp
&= ~FDI_LINK_TRAIN_NONE
;
2705 temp
|= FDI_LINK_TRAIN_PATTERN_2
;
2707 I915_WRITE(reg
, temp
);
2712 for (i
= 0; i
< 4; i
++) {
2713 reg
= FDI_TX_CTL(pipe
);
2714 temp
= I915_READ(reg
);
2715 temp
&= ~FDI_LINK_TRAIN_VOL_EMP_MASK
;
2716 temp
|= snb_b_fdi_train_param
[i
];
2717 I915_WRITE(reg
, temp
);
2722 for (retry
= 0; retry
< 5; retry
++) {
2723 reg
= FDI_RX_IIR(pipe
);
2724 temp
= I915_READ(reg
);
2725 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp
);
2726 if (temp
& FDI_RX_SYMBOL_LOCK
) {
2727 I915_WRITE(reg
, temp
| FDI_RX_SYMBOL_LOCK
);
2728 DRM_DEBUG_KMS("FDI train 2 done.\n");
2737 DRM_ERROR("FDI train 2 fail!\n");
2739 DRM_DEBUG_KMS("FDI train done.\n");
2742 /* Manual link training for Ivy Bridge A0 parts */
2743 static void ivb_manual_fdi_link_train(struct drm_crtc
*crtc
)
2745 struct drm_device
*dev
= crtc
->dev
;
2746 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2747 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2748 int pipe
= intel_crtc
->pipe
;
2751 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2753 reg
= FDI_RX_IMR(pipe
);
2754 temp
= I915_READ(reg
);
2755 temp
&= ~FDI_RX_SYMBOL_LOCK
;
2756 temp
&= ~FDI_RX_BIT_LOCK
;
2757 I915_WRITE(reg
, temp
);
2762 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
2763 I915_READ(FDI_RX_IIR(pipe
)));
2765 /* enable CPU FDI TX and PCH FDI RX */
2766 reg
= FDI_TX_CTL(pipe
);
2767 temp
= I915_READ(reg
);
2769 temp
|= (intel_crtc
->fdi_lanes
- 1) << 19;
2770 temp
&= ~(FDI_LINK_TRAIN_AUTO
| FDI_LINK_TRAIN_NONE_IVB
);
2771 temp
|= FDI_LINK_TRAIN_PATTERN_1_IVB
;
2772 temp
&= ~FDI_LINK_TRAIN_VOL_EMP_MASK
;
2773 temp
|= FDI_LINK_TRAIN_400MV_0DB_SNB_B
;
2774 temp
|= FDI_COMPOSITE_SYNC
;
2775 I915_WRITE(reg
, temp
| FDI_TX_ENABLE
);
2777 I915_WRITE(FDI_RX_MISC(pipe
),
2778 FDI_RX_TP1_TO_TP2_48
| FDI_RX_FDI_DELAY_90
);
2780 reg
= FDI_RX_CTL(pipe
);
2781 temp
= I915_READ(reg
);
2782 temp
&= ~FDI_LINK_TRAIN_AUTO
;
2783 temp
&= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT
;
2784 temp
|= FDI_LINK_TRAIN_PATTERN_1_CPT
;
2785 temp
|= FDI_COMPOSITE_SYNC
;
2786 I915_WRITE(reg
, temp
| FDI_RX_ENABLE
);
2791 for (i
= 0; i
< 4; i
++) {
2792 reg
= FDI_TX_CTL(pipe
);
2793 temp
= I915_READ(reg
);
2794 temp
&= ~FDI_LINK_TRAIN_VOL_EMP_MASK
;
2795 temp
|= snb_b_fdi_train_param
[i
];
2796 I915_WRITE(reg
, temp
);
2801 reg
= FDI_RX_IIR(pipe
);
2802 temp
= I915_READ(reg
);
2803 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp
);
2805 if (temp
& FDI_RX_BIT_LOCK
||
2806 (I915_READ(reg
) & FDI_RX_BIT_LOCK
)) {
2807 I915_WRITE(reg
, temp
| FDI_RX_BIT_LOCK
);
2808 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i
);
2813 DRM_ERROR("FDI train 1 fail!\n");
2816 reg
= FDI_TX_CTL(pipe
);
2817 temp
= I915_READ(reg
);
2818 temp
&= ~FDI_LINK_TRAIN_NONE_IVB
;
2819 temp
|= FDI_LINK_TRAIN_PATTERN_2_IVB
;
2820 temp
&= ~FDI_LINK_TRAIN_VOL_EMP_MASK
;
2821 temp
|= FDI_LINK_TRAIN_400MV_0DB_SNB_B
;
2822 I915_WRITE(reg
, temp
);
2824 reg
= FDI_RX_CTL(pipe
);
2825 temp
= I915_READ(reg
);
2826 temp
&= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT
;
2827 temp
|= FDI_LINK_TRAIN_PATTERN_2_CPT
;
2828 I915_WRITE(reg
, temp
);
2833 for (i
= 0; i
< 4; i
++) {
2834 reg
= FDI_TX_CTL(pipe
);
2835 temp
= I915_READ(reg
);
2836 temp
&= ~FDI_LINK_TRAIN_VOL_EMP_MASK
;
2837 temp
|= snb_b_fdi_train_param
[i
];
2838 I915_WRITE(reg
, temp
);
2843 reg
= FDI_RX_IIR(pipe
);
2844 temp
= I915_READ(reg
);
2845 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp
);
2847 if (temp
& FDI_RX_SYMBOL_LOCK
) {
2848 I915_WRITE(reg
, temp
| FDI_RX_SYMBOL_LOCK
);
2849 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i
);
2854 DRM_ERROR("FDI train 2 fail!\n");
2856 DRM_DEBUG_KMS("FDI train done.\n");
2859 static void ironlake_fdi_pll_enable(struct intel_crtc
*intel_crtc
)
2861 struct drm_device
*dev
= intel_crtc
->base
.dev
;
2862 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2863 int pipe
= intel_crtc
->pipe
;
2867 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2868 reg
= FDI_RX_CTL(pipe
);
2869 temp
= I915_READ(reg
);
2870 temp
&= ~((0x7 << 19) | (0x7 << 16));
2871 temp
|= (intel_crtc
->fdi_lanes
- 1) << 19;
2872 temp
|= (I915_READ(PIPECONF(pipe
)) & PIPECONF_BPC_MASK
) << 11;
2873 I915_WRITE(reg
, temp
| FDI_RX_PLL_ENABLE
);
2878 /* Switch from Rawclk to PCDclk */
2879 temp
= I915_READ(reg
);
2880 I915_WRITE(reg
, temp
| FDI_PCDCLK
);
2885 /* Enable CPU FDI TX PLL, always on for Ironlake */
2886 reg
= FDI_TX_CTL(pipe
);
2887 temp
= I915_READ(reg
);
2888 if ((temp
& FDI_TX_PLL_ENABLE
) == 0) {
2889 I915_WRITE(reg
, temp
| FDI_TX_PLL_ENABLE
);
2896 static void ironlake_fdi_pll_disable(struct intel_crtc
*intel_crtc
)
2898 struct drm_device
*dev
= intel_crtc
->base
.dev
;
2899 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2900 int pipe
= intel_crtc
->pipe
;
2903 /* Switch from PCDclk to Rawclk */
2904 reg
= FDI_RX_CTL(pipe
);
2905 temp
= I915_READ(reg
);
2906 I915_WRITE(reg
, temp
& ~FDI_PCDCLK
);
2908 /* Disable CPU FDI TX PLL */
2909 reg
= FDI_TX_CTL(pipe
);
2910 temp
= I915_READ(reg
);
2911 I915_WRITE(reg
, temp
& ~FDI_TX_PLL_ENABLE
);
2916 reg
= FDI_RX_CTL(pipe
);
2917 temp
= I915_READ(reg
);
2918 I915_WRITE(reg
, temp
& ~FDI_RX_PLL_ENABLE
);
2920 /* Wait for the clocks to turn off. */
2925 static void ironlake_fdi_disable(struct drm_crtc
*crtc
)
2927 struct drm_device
*dev
= crtc
->dev
;
2928 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2929 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2930 int pipe
= intel_crtc
->pipe
;
2933 /* disable CPU FDI tx and PCH FDI rx */
2934 reg
= FDI_TX_CTL(pipe
);
2935 temp
= I915_READ(reg
);
2936 I915_WRITE(reg
, temp
& ~FDI_TX_ENABLE
);
2939 reg
= FDI_RX_CTL(pipe
);
2940 temp
= I915_READ(reg
);
2941 temp
&= ~(0x7 << 16);
2942 temp
|= (I915_READ(PIPECONF(pipe
)) & PIPECONF_BPC_MASK
) << 11;
2943 I915_WRITE(reg
, temp
& ~FDI_RX_ENABLE
);
2948 /* Ironlake workaround, disable clock pointer after downing FDI */
2949 if (HAS_PCH_IBX(dev
)) {
2950 I915_WRITE(FDI_RX_CHICKEN(pipe
), FDI_RX_PHASE_SYNC_POINTER_OVR
);
2953 /* still set train pattern 1 */
2954 reg
= FDI_TX_CTL(pipe
);
2955 temp
= I915_READ(reg
);
2956 temp
&= ~FDI_LINK_TRAIN_NONE
;
2957 temp
|= FDI_LINK_TRAIN_PATTERN_1
;
2958 I915_WRITE(reg
, temp
);
2960 reg
= FDI_RX_CTL(pipe
);
2961 temp
= I915_READ(reg
);
2962 if (HAS_PCH_CPT(dev
)) {
2963 temp
&= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT
;
2964 temp
|= FDI_LINK_TRAIN_PATTERN_1_CPT
;
2966 temp
&= ~FDI_LINK_TRAIN_NONE
;
2967 temp
|= FDI_LINK_TRAIN_PATTERN_1
;
2969 /* BPC in FDI rx is consistent with that in PIPECONF */
2970 temp
&= ~(0x07 << 16);
2971 temp
|= (I915_READ(PIPECONF(pipe
)) & PIPECONF_BPC_MASK
) << 11;
2972 I915_WRITE(reg
, temp
);
2978 static bool intel_crtc_has_pending_flip(struct drm_crtc
*crtc
)
2980 struct drm_device
*dev
= crtc
->dev
;
2981 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2982 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2985 if (i915_reset_in_progress(&dev_priv
->gpu_error
) ||
2986 intel_crtc
->reset_counter
!= atomic_read(&dev_priv
->gpu_error
.reset_counter
))
2989 lockmgr(&dev
->event_lock
, LK_EXCLUSIVE
);
2990 pending
= to_intel_crtc(crtc
)->unpin_work
!= NULL
;
2991 lockmgr(&dev
->event_lock
, LK_RELEASE
);
2996 static void intel_crtc_wait_for_pending_flips(struct drm_crtc
*crtc
)
2998 struct drm_device
*dev
= crtc
->dev
;
2999 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3001 if (crtc
->fb
== NULL
)
3004 WARN_ON(waitqueue_active(&dev_priv
->pending_flip_queue
));
3006 wait_event(dev_priv
->pending_flip_queue
,
3007 !intel_crtc_has_pending_flip(crtc
));
3009 mutex_lock(&dev
->struct_mutex
);
3010 intel_finish_fb(crtc
->fb
);
3011 mutex_unlock(&dev
->struct_mutex
);
3014 /* Program iCLKIP clock to the desired frequency */
3015 static void lpt_program_iclkip(struct drm_crtc
*crtc
)
3017 struct drm_device
*dev
= crtc
->dev
;
3018 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3019 u32 divsel
, phaseinc
, auxdiv
, phasedir
= 0;
3022 mutex_lock(&dev_priv
->dpio_lock
);
3024 /* It is necessary to ungate the pixclk gate prior to programming
3025 * the divisors, and gate it back when it is done.
3027 I915_WRITE(PIXCLK_GATE
, PIXCLK_GATE_GATE
);
3029 /* Disable SSCCTL */
3030 intel_sbi_write(dev_priv
, SBI_SSCCTL6
,
3031 intel_sbi_read(dev_priv
, SBI_SSCCTL6
, SBI_ICLK
) |
3035 /* 20MHz is a corner case which is out of range for the 7-bit divisor */
3036 if (crtc
->mode
.clock
== 20000) {
3041 /* The iCLK virtual clock root frequency is in MHz,
3042 * but the crtc->mode.clock in in KHz. To get the divisors,
3043 * it is necessary to divide one by another, so we
3044 * convert the virtual clock precision to KHz here for higher
3047 u32 iclk_virtual_root_freq
= 172800 * 1000;
3048 u32 iclk_pi_range
= 64;
3049 u32 desired_divisor
, msb_divisor_value
, pi_value
;
3051 desired_divisor
= (iclk_virtual_root_freq
/ crtc
->mode
.clock
);
3052 msb_divisor_value
= desired_divisor
/ iclk_pi_range
;
3053 pi_value
= desired_divisor
% iclk_pi_range
;
3056 divsel
= msb_divisor_value
- 2;
3057 phaseinc
= pi_value
;
3060 /* This should not happen with any sane values */
3061 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel
) &
3062 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK
);
3063 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir
) &
3064 ~SBI_SSCDIVINTPHASE_INCVAL_MASK
);
3066 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3073 /* Program SSCDIVINTPHASE6 */
3074 temp
= intel_sbi_read(dev_priv
, SBI_SSCDIVINTPHASE6
, SBI_ICLK
);
3075 temp
&= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK
;
3076 temp
|= SBI_SSCDIVINTPHASE_DIVSEL(divsel
);
3077 temp
&= ~SBI_SSCDIVINTPHASE_INCVAL_MASK
;
3078 temp
|= SBI_SSCDIVINTPHASE_INCVAL(phaseinc
);
3079 temp
|= SBI_SSCDIVINTPHASE_DIR(phasedir
);
3080 temp
|= SBI_SSCDIVINTPHASE_PROPAGATE
;
3081 intel_sbi_write(dev_priv
, SBI_SSCDIVINTPHASE6
, temp
, SBI_ICLK
);
3083 /* Program SSCAUXDIV */
3084 temp
= intel_sbi_read(dev_priv
, SBI_SSCAUXDIV6
, SBI_ICLK
);
3085 temp
&= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3086 temp
|= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv
);
3087 intel_sbi_write(dev_priv
, SBI_SSCAUXDIV6
, temp
, SBI_ICLK
);
3089 /* Enable modulator and associated divider */
3090 temp
= intel_sbi_read(dev_priv
, SBI_SSCCTL6
, SBI_ICLK
);
3091 temp
&= ~SBI_SSCCTL_DISABLE
;
3092 intel_sbi_write(dev_priv
, SBI_SSCCTL6
, temp
, SBI_ICLK
);
3094 /* Wait for initialization time */
3097 I915_WRITE(PIXCLK_GATE
, PIXCLK_GATE_UNGATE
);
3099 mutex_unlock(&dev_priv
->dpio_lock
);
3103 * Enable PCH resources required for PCH ports:
3105 * - FDI training & RX/TX
3106 * - update transcoder timings
3107 * - DP transcoding bits
3110 static void ironlake_pch_enable(struct drm_crtc
*crtc
)
3112 struct drm_device
*dev
= crtc
->dev
;
3113 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3114 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3115 int pipe
= intel_crtc
->pipe
;
3118 assert_transcoder_disabled(dev_priv
, pipe
);
3120 /* Write the TU size bits before fdi link training, so that error
3121 * detection works. */
3122 I915_WRITE(FDI_RX_TUSIZE1(pipe
),
3123 I915_READ(PIPE_DATA_M1(pipe
)) & TU_SIZE_MASK
);
3125 /* For PCH output, training FDI link */
3126 dev_priv
->display
.fdi_link_train(crtc
);
3128 /* XXX: pch pll's can be enabled any time before we enable the PCH
3129 * transcoder, and we actually should do this to not upset any PCH
3130 * transcoder that already use the clock when we share it.
3132 * Note that enable_pch_pll tries to do the right thing, but get_pch_pll
3133 * unconditionally resets the pll - we need that to have the right LVDS
3134 * enable sequence. */
3135 ironlake_enable_pch_pll(intel_crtc
);
3137 if (HAS_PCH_CPT(dev
)) {
3140 temp
= I915_READ(PCH_DPLL_SEL
);
3144 temp
|= TRANSA_DPLL_ENABLE
;
3145 sel
= TRANSA_DPLLB_SEL
;
3148 temp
|= TRANSB_DPLL_ENABLE
;
3149 sel
= TRANSB_DPLLB_SEL
;
3152 temp
|= TRANSC_DPLL_ENABLE
;
3153 sel
= TRANSC_DPLLB_SEL
;
3156 if (intel_crtc
->pch_pll
->pll_reg
== _PCH_DPLL_B
)
3160 I915_WRITE(PCH_DPLL_SEL
, temp
);
3163 /* set transcoder timing, panel must allow it */
3164 assert_panel_unlocked(dev_priv
, pipe
);
3165 I915_WRITE(TRANS_HTOTAL(pipe
), I915_READ(HTOTAL(pipe
)));
3166 I915_WRITE(TRANS_HBLANK(pipe
), I915_READ(HBLANK(pipe
)));
3167 I915_WRITE(TRANS_HSYNC(pipe
), I915_READ(HSYNC(pipe
)));
3169 I915_WRITE(TRANS_VTOTAL(pipe
), I915_READ(VTOTAL(pipe
)));
3170 I915_WRITE(TRANS_VBLANK(pipe
), I915_READ(VBLANK(pipe
)));
3171 I915_WRITE(TRANS_VSYNC(pipe
), I915_READ(VSYNC(pipe
)));
3172 I915_WRITE(TRANS_VSYNCSHIFT(pipe
), I915_READ(VSYNCSHIFT(pipe
)));
3174 intel_fdi_normal_train(crtc
);
3176 /* For PCH DP, enable TRANS_DP_CTL */
3177 if (HAS_PCH_CPT(dev
) &&
3178 (intel_pipe_has_type(crtc
, INTEL_OUTPUT_DISPLAYPORT
) ||
3179 intel_pipe_has_type(crtc
, INTEL_OUTPUT_EDP
))) {
3180 u32 bpc
= (I915_READ(PIPECONF(pipe
)) & PIPECONF_BPC_MASK
) >> 5;
3181 reg
= TRANS_DP_CTL(pipe
);
3182 temp
= I915_READ(reg
);
3183 temp
&= ~(TRANS_DP_PORT_SEL_MASK
|
3184 TRANS_DP_SYNC_MASK
|
3186 temp
|= (TRANS_DP_OUTPUT_ENABLE
|
3187 TRANS_DP_ENH_FRAMING
);
3188 temp
|= bpc
<< 9; /* same format but at 11:9 */
3190 if (crtc
->mode
.flags
& DRM_MODE_FLAG_PHSYNC
)
3191 temp
|= TRANS_DP_HSYNC_ACTIVE_HIGH
;
3192 if (crtc
->mode
.flags
& DRM_MODE_FLAG_PVSYNC
)
3193 temp
|= TRANS_DP_VSYNC_ACTIVE_HIGH
;
3195 switch (intel_trans_dp_port_sel(crtc
)) {
3197 temp
|= TRANS_DP_PORT_SEL_B
;
3200 temp
|= TRANS_DP_PORT_SEL_C
;
3203 temp
|= TRANS_DP_PORT_SEL_D
;
3209 I915_WRITE(reg
, temp
);
3212 ironlake_enable_pch_transcoder(dev_priv
, pipe
);
3215 static void lpt_pch_enable(struct drm_crtc
*crtc
)
3217 struct drm_device
*dev
= crtc
->dev
;
3218 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3219 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3220 enum transcoder cpu_transcoder
= intel_crtc
->config
.cpu_transcoder
;
3222 assert_transcoder_disabled(dev_priv
, TRANSCODER_A
);
3224 lpt_program_iclkip(crtc
);
3226 /* Set transcoder timing. */
3227 I915_WRITE(_TRANS_HTOTAL_A
, I915_READ(HTOTAL(cpu_transcoder
)));
3228 I915_WRITE(_TRANS_HBLANK_A
, I915_READ(HBLANK(cpu_transcoder
)));
3229 I915_WRITE(_TRANS_HSYNC_A
, I915_READ(HSYNC(cpu_transcoder
)));
3231 I915_WRITE(_TRANS_VTOTAL_A
, I915_READ(VTOTAL(cpu_transcoder
)));
3232 I915_WRITE(_TRANS_VBLANK_A
, I915_READ(VBLANK(cpu_transcoder
)));
3233 I915_WRITE(_TRANS_VSYNC_A
, I915_READ(VSYNC(cpu_transcoder
)));
3234 I915_WRITE(_TRANS_VSYNCSHIFT_A
, I915_READ(VSYNCSHIFT(cpu_transcoder
)));
3236 lpt_enable_pch_transcoder(dev_priv
, cpu_transcoder
);
3239 static void intel_put_pch_pll(struct intel_crtc
*intel_crtc
)
3241 struct intel_pch_pll
*pll
= intel_crtc
->pch_pll
;
3246 if (pll
->refcount
== 0) {
3247 WARN(1, "bad PCH PLL refcount\n");
3252 intel_crtc
->pch_pll
= NULL
;
3255 static struct intel_pch_pll
*intel_get_pch_pll(struct intel_crtc
*intel_crtc
, u32 dpll
, u32 fp
)
3257 struct drm_i915_private
*dev_priv
= intel_crtc
->base
.dev
->dev_private
;
3258 struct intel_pch_pll
*pll
;
3261 pll
= intel_crtc
->pch_pll
;
3263 DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
3264 intel_crtc
->base
.base
.id
, pll
->pll_reg
);
3268 if (HAS_PCH_IBX(dev_priv
->dev
)) {
3269 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3270 i
= intel_crtc
->pipe
;
3271 pll
= &dev_priv
->pch_plls
[i
];
3273 DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
3274 intel_crtc
->base
.base
.id
, pll
->pll_reg
);
3279 for (i
= 0; i
< dev_priv
->num_pch_pll
; i
++) {
3280 pll
= &dev_priv
->pch_plls
[i
];
3282 /* Only want to check enabled timings first */
3283 if (pll
->refcount
== 0)
3286 if (dpll
== (I915_READ(pll
->pll_reg
) & 0x7fffffff) &&
3287 fp
== I915_READ(pll
->fp0_reg
)) {
3288 DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
3289 intel_crtc
->base
.base
.id
,
3290 pll
->pll_reg
, pll
->refcount
, pll
->active
);
3296 /* Ok no matching timings, maybe there's a free one? */
3297 for (i
= 0; i
< dev_priv
->num_pch_pll
; i
++) {
3298 pll
= &dev_priv
->pch_plls
[i
];
3299 if (pll
->refcount
== 0) {
3300 DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
3301 intel_crtc
->base
.base
.id
, pll
->pll_reg
);
3309 intel_crtc
->pch_pll
= pll
;
3311 DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i
, intel_crtc
->pipe
);
3312 prepare
: /* separate function? */
3313 DRM_DEBUG_DRIVER("switching PLL %x off\n", pll
->pll_reg
);
3315 /* Wait for the clocks to stabilize before rewriting the regs */
3316 I915_WRITE(pll
->pll_reg
, dpll
& ~DPLL_VCO_ENABLE
);
3317 POSTING_READ(pll
->pll_reg
);
3320 I915_WRITE(pll
->fp0_reg
, fp
);
3321 I915_WRITE(pll
->pll_reg
, dpll
& ~DPLL_VCO_ENABLE
);
3326 void intel_cpt_verify_modeset(struct drm_device
*dev
, int pipe
)
3328 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3329 int dslreg
= PIPEDSL(pipe
);
3332 temp
= I915_READ(dslreg
);
3334 if (wait_for(I915_READ(dslreg
) != temp
, 5)) {
3335 if (wait_for(I915_READ(dslreg
) != temp
, 5))
3336 DRM_ERROR("mode set failed: pipe %d stuck\n", pipe
);
3340 static void ironlake_crtc_enable(struct drm_crtc
*crtc
)
3342 struct drm_device
*dev
= crtc
->dev
;
3343 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3344 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3345 struct intel_encoder
*encoder
;
3346 int pipe
= intel_crtc
->pipe
;
3347 int plane
= intel_crtc
->plane
;
3350 WARN_ON(!crtc
->enabled
);
3352 if (intel_crtc
->active
)
3355 intel_crtc
->active
= true;
3356 intel_update_watermarks(dev
);
3358 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
)) {
3359 temp
= I915_READ(PCH_LVDS
);
3360 if ((temp
& LVDS_PORT_EN
) == 0)
3361 I915_WRITE(PCH_LVDS
, temp
| LVDS_PORT_EN
);
3365 if (intel_crtc
->config
.has_pch_encoder
) {
3366 /* Note: FDI PLL enabling _must_ be done before we enable the
3367 * cpu pipes, hence this is separate from all the other fdi/pch
3369 ironlake_fdi_pll_enable(intel_crtc
);
3371 assert_fdi_tx_disabled(dev_priv
, pipe
);
3372 assert_fdi_rx_disabled(dev_priv
, pipe
);
3375 for_each_encoder_on_crtc(dev
, crtc
, encoder
)
3376 if (encoder
->pre_enable
)
3377 encoder
->pre_enable(encoder
);
3379 /* Enable panel fitting for LVDS */
3380 if (dev_priv
->pch_pf_size
&&
3381 (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
) ||
3382 intel_pipe_has_type(crtc
, INTEL_OUTPUT_EDP
))) {
3383 /* Force use of hard-coded filter coefficients
3384 * as some pre-programmed values are broken,
3387 if (IS_IVYBRIDGE(dev
))
3388 I915_WRITE(PF_CTL(pipe
), PF_ENABLE
| PF_FILTER_MED_3x3
|
3389 PF_PIPE_SEL_IVB(pipe
));
3391 I915_WRITE(PF_CTL(pipe
), PF_ENABLE
| PF_FILTER_MED_3x3
);
3392 I915_WRITE(PF_WIN_POS(pipe
), dev_priv
->pch_pf_pos
);
3393 I915_WRITE(PF_WIN_SZ(pipe
), dev_priv
->pch_pf_size
);
3397 * On ILK+ LUT must be loaded before the pipe is running but with
3400 intel_crtc_load_lut(crtc
);
3402 intel_enable_pipe(dev_priv
, pipe
,
3403 intel_crtc
->config
.has_pch_encoder
);
3404 intel_enable_plane(dev_priv
, plane
, pipe
);
3406 if (intel_crtc
->config
.has_pch_encoder
)
3407 ironlake_pch_enable(crtc
);
3409 mutex_lock(&dev
->struct_mutex
);
3410 intel_update_fbc(dev
);
3411 mutex_unlock(&dev
->struct_mutex
);
3413 intel_crtc_update_cursor(crtc
, true);
3415 for_each_encoder_on_crtc(dev
, crtc
, encoder
)
3416 encoder
->enable(encoder
);
3418 if (HAS_PCH_CPT(dev
))
3419 intel_cpt_verify_modeset(dev
, intel_crtc
->pipe
);
3422 * There seems to be a race in PCH platform hw (at least on some
3423 * outputs) where an enabled pipe still completes any pageflip right
3424 * away (as if the pipe is off) instead of waiting for vblank. As soon
3425 * as the first vblank happend, everything works as expected. Hence just
3426 * wait for one vblank before returning to avoid strange things
3429 intel_wait_for_vblank(dev
, intel_crtc
->pipe
);
3432 static void haswell_crtc_enable(struct drm_crtc
*crtc
)
3434 struct drm_device
*dev
= crtc
->dev
;
3435 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3436 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3437 struct intel_encoder
*encoder
;
3438 int pipe
= intel_crtc
->pipe
;
3439 int plane
= intel_crtc
->plane
;
3441 WARN_ON(!crtc
->enabled
);
3443 if (intel_crtc
->active
)
3446 intel_crtc
->active
= true;
3447 intel_update_watermarks(dev
);
3449 if (intel_crtc
->config
.has_pch_encoder
)
3450 dev_priv
->display
.fdi_link_train(crtc
);
3452 for_each_encoder_on_crtc(dev
, crtc
, encoder
)
3453 if (encoder
->pre_enable
)
3454 encoder
->pre_enable(encoder
);
3456 intel_ddi_enable_pipe_clock(intel_crtc
);
3458 /* Enable panel fitting for eDP */
3459 if (dev_priv
->pch_pf_size
&&
3460 intel_pipe_has_type(crtc
, INTEL_OUTPUT_EDP
)) {
3461 /* Force use of hard-coded filter coefficients
3462 * as some pre-programmed values are broken,
3465 I915_WRITE(PF_CTL(pipe
), PF_ENABLE
| PF_FILTER_MED_3x3
|
3466 PF_PIPE_SEL_IVB(pipe
));
3467 I915_WRITE(PF_WIN_POS(pipe
), dev_priv
->pch_pf_pos
);
3468 I915_WRITE(PF_WIN_SZ(pipe
), dev_priv
->pch_pf_size
);
3472 * On ILK+ LUT must be loaded before the pipe is running but with
3475 intel_crtc_load_lut(crtc
);
3477 intel_ddi_set_pipe_settings(crtc
);
3478 intel_ddi_enable_transcoder_func(crtc
);
3480 intel_enable_pipe(dev_priv
, pipe
,
3481 intel_crtc
->config
.has_pch_encoder
);
3482 intel_enable_plane(dev_priv
, plane
, pipe
);
3484 if (intel_crtc
->config
.has_pch_encoder
)
3485 lpt_pch_enable(crtc
);
3487 mutex_lock(&dev
->struct_mutex
);
3488 intel_update_fbc(dev
);
3489 mutex_unlock(&dev
->struct_mutex
);
3491 intel_crtc_update_cursor(crtc
, true);
3493 for_each_encoder_on_crtc(dev
, crtc
, encoder
)
3494 encoder
->enable(encoder
);
3497 * There seems to be a race in PCH platform hw (at least on some
3498 * outputs) where an enabled pipe still completes any pageflip right
3499 * away (as if the pipe is off) instead of waiting for vblank. As soon
3500 * as the first vblank happend, everything works as expected. Hence just
3501 * wait for one vblank before returning to avoid strange things
3504 intel_wait_for_vblank(dev
, intel_crtc
->pipe
);
3507 static void ironlake_crtc_disable(struct drm_crtc
*crtc
)
3509 struct drm_device
*dev
= crtc
->dev
;
3510 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3511 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3512 struct intel_encoder
*encoder
;
3513 int pipe
= intel_crtc
->pipe
;
3514 int plane
= intel_crtc
->plane
;
3518 if (!intel_crtc
->active
)
3521 for_each_encoder_on_crtc(dev
, crtc
, encoder
)
3522 encoder
->disable(encoder
);
3524 intel_crtc_wait_for_pending_flips(crtc
);
3525 drm_vblank_off(dev
, pipe
);
3526 intel_crtc_update_cursor(crtc
, false);
3528 intel_disable_plane(dev_priv
, plane
, pipe
);
3530 if (dev_priv
->cfb_plane
== plane
)
3531 intel_disable_fbc(dev
);
3533 intel_disable_pipe(dev_priv
, pipe
);
3536 I915_WRITE(PF_CTL(pipe
), 0);
3537 I915_WRITE(PF_WIN_SZ(pipe
), 0);
3539 for_each_encoder_on_crtc(dev
, crtc
, encoder
)
3540 if (encoder
->post_disable
)
3541 encoder
->post_disable(encoder
);
3543 ironlake_fdi_disable(crtc
);
3545 ironlake_disable_pch_transcoder(dev_priv
, pipe
);
3547 if (HAS_PCH_CPT(dev
)) {
3548 /* disable TRANS_DP_CTL */
3549 reg
= TRANS_DP_CTL(pipe
);
3550 temp
= I915_READ(reg
);
3551 temp
&= ~(TRANS_DP_OUTPUT_ENABLE
| TRANS_DP_PORT_SEL_MASK
);
3552 temp
|= TRANS_DP_PORT_SEL_NONE
;
3553 I915_WRITE(reg
, temp
);
3555 /* disable DPLL_SEL */
3556 temp
= I915_READ(PCH_DPLL_SEL
);
3559 temp
&= ~(TRANSA_DPLL_ENABLE
| TRANSA_DPLLB_SEL
);
3562 temp
&= ~(TRANSB_DPLL_ENABLE
| TRANSB_DPLLB_SEL
);
3565 /* C shares PLL A or B */
3566 temp
&= ~(TRANSC_DPLL_ENABLE
| TRANSC_DPLLB_SEL
);
3571 I915_WRITE(PCH_DPLL_SEL
, temp
);
3574 /* disable PCH DPLL */
3575 intel_disable_pch_pll(intel_crtc
);
3577 ironlake_fdi_pll_disable(intel_crtc
);
3579 intel_crtc
->active
= false;
3580 intel_update_watermarks(dev
);
3582 mutex_lock(&dev
->struct_mutex
);
3583 intel_update_fbc(dev
);
3584 mutex_unlock(&dev
->struct_mutex
);
3587 static void haswell_crtc_disable(struct drm_crtc
*crtc
)
3589 struct drm_device
*dev
= crtc
->dev
;
3590 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3591 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3592 struct intel_encoder
*encoder
;
3593 int pipe
= intel_crtc
->pipe
;
3594 int plane
= intel_crtc
->plane
;
3595 enum transcoder cpu_transcoder
= intel_crtc
->config
.cpu_transcoder
;
3597 if (!intel_crtc
->active
)
3600 for_each_encoder_on_crtc(dev
, crtc
, encoder
)
3601 encoder
->disable(encoder
);
3603 intel_crtc_wait_for_pending_flips(crtc
);
3604 drm_vblank_off(dev
, pipe
);
3605 intel_crtc_update_cursor(crtc
, false);
3607 intel_disable_plane(dev_priv
, plane
, pipe
);
3609 if (dev_priv
->cfb_plane
== plane
)
3610 intel_disable_fbc(dev
);
3612 intel_disable_pipe(dev_priv
, pipe
);
3614 intel_ddi_disable_transcoder_func(dev_priv
, cpu_transcoder
);
3616 /* XXX: Once we have proper panel fitter state tracking implemented with
3617 * hardware state read/check support we should switch to only disable
3618 * the panel fitter when we know it's used. */
3619 if (intel_using_power_well(dev
)) {
3620 I915_WRITE(PF_CTL(pipe
), 0);
3621 I915_WRITE(PF_WIN_SZ(pipe
), 0);
3624 intel_ddi_disable_pipe_clock(intel_crtc
);
3626 for_each_encoder_on_crtc(dev
, crtc
, encoder
)
3627 if (encoder
->post_disable
)
3628 encoder
->post_disable(encoder
);
3630 if (intel_crtc
->config
.has_pch_encoder
) {
3631 lpt_disable_pch_transcoder(dev_priv
);
3632 intel_ddi_fdi_disable(crtc
);
3635 intel_crtc
->active
= false;
3636 intel_update_watermarks(dev
);
3638 mutex_lock(&dev
->struct_mutex
);
3639 intel_update_fbc(dev
);
3640 mutex_unlock(&dev
->struct_mutex
);
3643 static void ironlake_crtc_off(struct drm_crtc
*crtc
)
3645 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3646 intel_put_pch_pll(intel_crtc
);
3649 static void haswell_crtc_off(struct drm_crtc
*crtc
)
3651 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3653 /* Stop saying we're using TRANSCODER_EDP because some other CRTC might
3654 * start using it. */
3655 intel_crtc
->config
.cpu_transcoder
= (enum transcoder
) intel_crtc
->pipe
;
3657 intel_ddi_put_crtc_pll(crtc
);
3660 static void intel_crtc_dpms_overlay(struct intel_crtc
*intel_crtc
, bool enable
)
3662 if (!enable
&& intel_crtc
->overlay
) {
3663 struct drm_device
*dev
= intel_crtc
->base
.dev
;
3664 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3666 mutex_lock(&dev
->struct_mutex
);
3667 dev_priv
->mm
.interruptible
= false;
3668 (void) intel_overlay_switch_off(intel_crtc
->overlay
);
3669 dev_priv
->mm
.interruptible
= true;
3670 mutex_unlock(&dev
->struct_mutex
);
3673 /* Let userspace switch the overlay on again. In most cases userspace
3674 * has to recompute where to put it anyway.
3679 * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
3680 * cursor plane briefly if not already running after enabling the display
3682 * This workaround avoids occasional blank screens when self refresh is
3686 g4x_fixup_plane(struct drm_i915_private
*dev_priv
, enum i915_pipe pipe
)
3688 u32 cntl
= I915_READ(CURCNTR(pipe
));
3690 if ((cntl
& CURSOR_MODE
) == 0) {
3691 u32 fw_bcl_self
= I915_READ(FW_BLC_SELF
);
3693 I915_WRITE(FW_BLC_SELF
, fw_bcl_self
& ~FW_BLC_SELF_EN
);
3694 I915_WRITE(CURCNTR(pipe
), CURSOR_MODE_64_ARGB_AX
);
3695 intel_wait_for_vblank(dev_priv
->dev
, pipe
);
3696 I915_WRITE(CURCNTR(pipe
), cntl
);
3697 I915_WRITE(CURBASE(pipe
), I915_READ(CURBASE(pipe
)));
3698 I915_WRITE(FW_BLC_SELF
, fw_bcl_self
);
3702 static void i9xx_crtc_enable(struct drm_crtc
*crtc
)
3704 struct drm_device
*dev
= crtc
->dev
;
3705 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3706 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3707 struct intel_encoder
*encoder
;
3708 int pipe
= intel_crtc
->pipe
;
3709 int plane
= intel_crtc
->plane
;
3711 WARN_ON(!crtc
->enabled
);
3713 if (intel_crtc
->active
)
3716 intel_crtc
->active
= true;
3717 intel_update_watermarks(dev
);
3719 intel_enable_pll(dev_priv
, pipe
);
3721 for_each_encoder_on_crtc(dev
, crtc
, encoder
)
3722 if (encoder
->pre_enable
)
3723 encoder
->pre_enable(encoder
);
3725 intel_enable_pipe(dev_priv
, pipe
, false);
3726 intel_enable_plane(dev_priv
, plane
, pipe
);
3728 g4x_fixup_plane(dev_priv
, pipe
);
3730 intel_crtc_load_lut(crtc
);
3731 intel_update_fbc(dev
);
3733 /* Give the overlay scaler a chance to enable if it's on this pipe */
3734 intel_crtc_dpms_overlay(intel_crtc
, true);
3735 intel_crtc_update_cursor(crtc
, true);
3737 for_each_encoder_on_crtc(dev
, crtc
, encoder
)
3738 encoder
->enable(encoder
);
3741 static void i9xx_pfit_disable(struct intel_crtc
*crtc
)
3743 struct drm_device
*dev
= crtc
->base
.dev
;
3744 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3745 enum i915_pipe pipe
;
3746 uint32_t pctl
= I915_READ(PFIT_CONTROL
);
3748 assert_pipe_disabled(dev_priv
, crtc
->pipe
);
3750 if (INTEL_INFO(dev
)->gen
>= 4)
3751 pipe
= (pctl
& PFIT_PIPE_MASK
) >> PFIT_PIPE_SHIFT
;
3755 if (pipe
== crtc
->pipe
) {
3756 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", pctl
);
3757 I915_WRITE(PFIT_CONTROL
, 0);
3761 static void i9xx_crtc_disable(struct drm_crtc
*crtc
)
3763 struct drm_device
*dev
= crtc
->dev
;
3764 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3765 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3766 struct intel_encoder
*encoder
;
3767 int pipe
= intel_crtc
->pipe
;
3768 int plane
= intel_crtc
->plane
;
3770 if (!intel_crtc
->active
)
3773 for_each_encoder_on_crtc(dev
, crtc
, encoder
)
3774 encoder
->disable(encoder
);
3776 /* Give the overlay scaler a chance to disable if it's on this pipe */
3777 intel_crtc_wait_for_pending_flips(crtc
);
3778 drm_vblank_off(dev
, pipe
);
3779 intel_crtc_dpms_overlay(intel_crtc
, false);
3780 intel_crtc_update_cursor(crtc
, false);
3782 if (dev_priv
->cfb_plane
== plane
)
3783 intel_disable_fbc(dev
);
3785 intel_disable_plane(dev_priv
, plane
, pipe
);
3786 intel_disable_pipe(dev_priv
, pipe
);
3788 i9xx_pfit_disable(intel_crtc
);
3790 intel_disable_pll(dev_priv
, pipe
);
3792 intel_crtc
->active
= false;
3793 intel_update_fbc(dev
);
3794 intel_update_watermarks(dev
);
3797 static void i9xx_crtc_off(struct drm_crtc
*crtc
)
3801 static void intel_crtc_update_sarea(struct drm_crtc
*crtc
,
3804 struct drm_device
*dev
= crtc
->dev
;
3805 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3807 struct drm_i915_master_private
*master_priv
;
3809 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3810 int pipe
= intel_crtc
->pipe
;
3813 if (!dev
->primary
->master
)
3816 master_priv
= dev
->primary
->master
->driver_priv
;
3817 if (!master_priv
->sarea_priv
)
3820 if (!dev_priv
->sarea_priv
)
3827 master_priv
->sarea_priv
->pipeA_w
= enabled
? crtc
->mode
.hdisplay
: 0;
3828 master_priv
->sarea_priv
->pipeA_h
= enabled
? crtc
->mode
.vdisplay
: 0;
3830 dev_priv
->sarea_priv
->planeA_w
= enabled
? crtc
->mode
.hdisplay
: 0;
3831 dev_priv
->sarea_priv
->planeA_h
= enabled
? crtc
->mode
.vdisplay
: 0;
3836 master_priv
->sarea_priv
->pipeB_w
= enabled
? crtc
->mode
.hdisplay
: 0;
3837 master_priv
->sarea_priv
->pipeB_h
= enabled
? crtc
->mode
.vdisplay
: 0;
3839 dev_priv
->sarea_priv
->planeB_w
= enabled
? crtc
->mode
.hdisplay
: 0;
3840 dev_priv
->sarea_priv
->planeB_h
= enabled
? crtc
->mode
.vdisplay
: 0;
3844 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe
));
3850 * Sets the power management mode of the pipe and plane.
3852 void intel_crtc_update_dpms(struct drm_crtc
*crtc
)
3854 struct drm_device
*dev
= crtc
->dev
;
3855 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3856 struct intel_encoder
*intel_encoder
;
3857 bool enable
= false;
3859 for_each_encoder_on_crtc(dev
, crtc
, intel_encoder
)
3860 enable
|= intel_encoder
->connectors_active
;
3863 dev_priv
->display
.crtc_enable(crtc
);
3865 dev_priv
->display
.crtc_disable(crtc
);
3867 intel_crtc_update_sarea(crtc
, enable
);
3870 static void intel_crtc_noop(struct drm_crtc
*crtc
)
3874 static void intel_crtc_disable(struct drm_crtc
*crtc
)
3876 struct drm_device
*dev
= crtc
->dev
;
3877 struct drm_connector
*connector
;
3878 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3879 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3881 /* crtc should still be enabled when we disable it. */
3882 WARN_ON(!crtc
->enabled
);
3884 intel_crtc
->eld_vld
= false;
3885 dev_priv
->display
.crtc_disable(crtc
);
3886 intel_crtc_update_sarea(crtc
, false);
3887 dev_priv
->display
.off(crtc
);
3889 assert_plane_disabled(dev
->dev_private
, to_intel_crtc(crtc
)->plane
);
3890 assert_pipe_disabled(dev
->dev_private
, to_intel_crtc(crtc
)->pipe
);
3893 mutex_lock(&dev
->struct_mutex
);
3894 intel_unpin_fb_obj(to_intel_framebuffer(crtc
->fb
)->obj
);
3895 mutex_unlock(&dev
->struct_mutex
);
3899 /* Update computed state. */
3900 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
3901 if (!connector
->encoder
|| !connector
->encoder
->crtc
)
3904 if (connector
->encoder
->crtc
!= crtc
)
3907 connector
->dpms
= DRM_MODE_DPMS_OFF
;
3908 to_intel_encoder(connector
->encoder
)->connectors_active
= false;
3912 void intel_modeset_disable(struct drm_device
*dev
)
3914 struct drm_crtc
*crtc
;
3916 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
3918 intel_crtc_disable(crtc
);
3922 void intel_encoder_noop(struct drm_encoder
*encoder
)
3926 void intel_encoder_destroy(struct drm_encoder
*encoder
)
3928 struct intel_encoder
*intel_encoder
= to_intel_encoder(encoder
);
3930 drm_encoder_cleanup(encoder
);
3931 drm_free(intel_encoder
, M_DRM
);
3934 /* Simple dpms helper for encodres with just one connector, no cloning and only
3935 * one kind of off state. It clamps all !ON modes to fully OFF and changes the
3936 * state of the entire output pipe. */
3937 void intel_encoder_dpms(struct intel_encoder
*encoder
, int mode
)
3939 if (mode
== DRM_MODE_DPMS_ON
) {
3940 encoder
->connectors_active
= true;
3942 intel_crtc_update_dpms(encoder
->base
.crtc
);
3944 encoder
->connectors_active
= false;
3946 intel_crtc_update_dpms(encoder
->base
.crtc
);
3950 /* Cross check the actual hw state with our own modeset state tracking (and it's
3951 * internal consistency). */
3952 static void intel_connector_check_state(struct intel_connector
*connector
)
3954 if (connector
->get_hw_state(connector
)) {
3955 struct intel_encoder
*encoder
= connector
->encoder
;
3956 struct drm_crtc
*crtc
;
3957 bool encoder_enabled
;
3958 enum i915_pipe pipe
;
3960 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3961 connector
->base
.base
.id
,
3962 drm_get_connector_name(&connector
->base
));
3964 WARN(connector
->base
.dpms
== DRM_MODE_DPMS_OFF
,
3965 "wrong connector dpms state\n");
3966 WARN(connector
->base
.encoder
!= &encoder
->base
,
3967 "active connector not linked to encoder\n");
3968 WARN(!encoder
->connectors_active
,
3969 "encoder->connectors_active not set\n");
3971 encoder_enabled
= encoder
->get_hw_state(encoder
, &pipe
);
3972 WARN(!encoder_enabled
, "encoder not enabled\n");
3973 if (WARN_ON(!encoder
->base
.crtc
))
3976 crtc
= encoder
->base
.crtc
;
3978 WARN(!crtc
->enabled
, "crtc not enabled\n");
3979 WARN(!to_intel_crtc(crtc
)->active
, "crtc not active\n");
3980 WARN(pipe
!= to_intel_crtc(crtc
)->pipe
,
3981 "encoder active on the wrong pipe\n");
3985 /* Even simpler default implementation, if there's really no special case to
3987 void intel_connector_dpms(struct drm_connector
*connector
, int mode
)
3989 struct intel_encoder
*encoder
= intel_attached_encoder(connector
);
3991 /* All the simple cases only support two dpms states. */
3992 if (mode
!= DRM_MODE_DPMS_ON
)
3993 mode
= DRM_MODE_DPMS_OFF
;
3995 if (mode
== connector
->dpms
)
3998 connector
->dpms
= mode
;
4000 /* Only need to change hw state when actually enabled */
4001 if (encoder
->base
.crtc
)
4002 intel_encoder_dpms(encoder
, mode
);
4004 WARN_ON(encoder
->connectors_active
!= false);
4006 intel_modeset_check_state(connector
->dev
);
4009 /* Simple connector->get_hw_state implementation for encoders that support only
4010 * one connector and no cloning and hence the encoder state determines the state
4011 * of the connector. */
4012 bool intel_connector_get_hw_state(struct intel_connector
*connector
)
4014 enum i915_pipe pipe
= 0;
4015 struct intel_encoder
*encoder
= connector
->encoder
;
4017 return encoder
->get_hw_state(encoder
, &pipe
);
4020 static bool intel_crtc_compute_config(struct drm_crtc
*crtc
,
4021 struct intel_crtc_config
*pipe_config
)
4023 struct drm_device
*dev
= crtc
->dev
;
4024 struct drm_display_mode
*adjusted_mode
= &pipe_config
->adjusted_mode
;
4026 if (HAS_PCH_SPLIT(dev
)) {
4027 /* FDI link clock is fixed at 2.7G */
4028 if (pipe_config
->requested_mode
.clock
* 3
4029 > IRONLAKE_FDI_FREQ
* 4)
4033 /* All interlaced capable intel hw wants timings in frames. Note though
4034 * that intel_lvds_mode_fixup does some funny tricks with the crtc
4035 * timings, so we need to be careful not to clobber these.*/
4036 if (!pipe_config
->timings_set
)
4037 drm_mode_set_crtcinfo(adjusted_mode
, 0);
4039 /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
4040 * with a hsync front porch of 0.
4042 if ((INTEL_INFO(dev
)->gen
> 4 || IS_G4X(dev
)) &&
4043 adjusted_mode
->hsync_start
== adjusted_mode
->hdisplay
)
4046 if ((IS_G4X(dev
) || IS_VALLEYVIEW(dev
)) && pipe_config
->pipe_bpp
> 10*3) {
4047 pipe_config
->pipe_bpp
= 10*3; /* 12bpc is gen5+ */
4048 } else if (INTEL_INFO(dev
)->gen
<= 4 && pipe_config
->pipe_bpp
> 8*3) {
4049 /* only a 8bpc pipe, with 6bpc dither through the panel fitter
4051 pipe_config
->pipe_bpp
= 8*3;
4057 static int valleyview_get_display_clock_speed(struct drm_device
*dev
)
4059 return 400000; /* FIXME */
4062 static int i945_get_display_clock_speed(struct drm_device
*dev
)
4067 static int i915_get_display_clock_speed(struct drm_device
*dev
)
4072 static int i9xx_misc_get_display_clock_speed(struct drm_device
*dev
)
4077 static int i915gm_get_display_clock_speed(struct drm_device
*dev
)
4081 pci_read_config_word(dev
->pdev
, GCFGC
, &gcfgc
);
4083 if (gcfgc
& GC_LOW_FREQUENCY_ENABLE
)
4086 switch (gcfgc
& GC_DISPLAY_CLOCK_MASK
) {
4087 case GC_DISPLAY_CLOCK_333_MHZ
:
4090 case GC_DISPLAY_CLOCK_190_200_MHZ
:
4096 static int i865_get_display_clock_speed(struct drm_device
*dev
)
4101 static int i855_get_display_clock_speed(struct drm_device
*dev
)
4104 /* Assume that the hardware is in the high speed state. This
4105 * should be the default.
4107 switch (hpllcc
& GC_CLOCK_CONTROL_MASK
) {
4108 case GC_CLOCK_133_200
:
4109 case GC_CLOCK_100_200
:
4111 case GC_CLOCK_166_250
:
4113 case GC_CLOCK_100_133
:
4117 /* Shouldn't happen */
4121 static int i830_get_display_clock_speed(struct drm_device
*dev
)
4127 intel_reduce_m_n_ratio(uint32_t *num
, uint32_t *den
)
4129 while (*num
> DATA_LINK_M_N_MASK
||
4130 *den
> DATA_LINK_M_N_MASK
) {
4136 static void compute_m_n(unsigned int m
, unsigned int n
,
4137 uint32_t *ret_m
, uint32_t *ret_n
)
4139 *ret_n
= min_t(unsigned int, roundup_pow_of_two(n
), DATA_LINK_N_MAX
);
4140 *ret_m
= div_u64((uint64_t) m
* *ret_n
, n
);
4141 intel_reduce_m_n_ratio(ret_m
, ret_n
);
4145 intel_link_compute_m_n(int bits_per_pixel
, int nlanes
,
4146 int pixel_clock
, int link_clock
,
4147 struct intel_link_m_n
*m_n
)
4151 compute_m_n(bits_per_pixel
* pixel_clock
,
4152 link_clock
* nlanes
* 8,
4153 &m_n
->gmch_m
, &m_n
->gmch_n
);
4155 compute_m_n(pixel_clock
, link_clock
,
4156 &m_n
->link_m
, &m_n
->link_n
);
4159 static inline bool intel_panel_use_ssc(struct drm_i915_private
*dev_priv
)
4161 if (i915_panel_use_ssc
>= 0)
4162 return i915_panel_use_ssc
!= 0;
4163 return dev_priv
->lvds_use_ssc
4164 && !(dev_priv
->quirks
& QUIRK_LVDS_SSC_DISABLE
);
4167 static int vlv_get_refclk(struct drm_crtc
*crtc
)
4169 struct drm_device
*dev
= crtc
->dev
;
4170 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4171 int refclk
= 27000; /* for DP & HDMI */
4173 return 100000; /* only one validated so far */
4175 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_ANALOG
)) {
4177 } else if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
)) {
4178 if (intel_panel_use_ssc(dev_priv
))
4182 } else if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_EDP
)) {
4189 static int i9xx_get_refclk(struct drm_crtc
*crtc
, int num_connectors
)
4191 struct drm_device
*dev
= crtc
->dev
;
4192 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4195 if (IS_VALLEYVIEW(dev
)) {
4196 refclk
= vlv_get_refclk(crtc
);
4197 } else if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_LVDS
) &&
4198 intel_panel_use_ssc(dev_priv
) && num_connectors
< 2) {
4199 refclk
= dev_priv
->lvds_ssc_freq
* 1000;
4200 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4202 } else if (!IS_GEN2(dev
)) {
4211 static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc
*crtc
)
4213 unsigned dotclock
= crtc
->config
.adjusted_mode
.clock
;
4214 struct dpll
*clock
= &crtc
->config
.dpll
;
4216 /* SDVO TV has fixed PLL values depend on its clock range,
4217 this mirrors vbios setting. */
4218 if (dotclock
>= 100000 && dotclock
< 140500) {
4224 } else if (dotclock
>= 140500 && dotclock
<= 200000) {
4232 crtc
->config
.clock_set
= true;
4235 static void i9xx_update_pll_dividers(struct intel_crtc
*crtc
,
4236 intel_clock_t
*reduced_clock
)
4238 struct drm_device
*dev
= crtc
->base
.dev
;
4239 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4240 int pipe
= crtc
->pipe
;
4242 struct dpll
*clock
= &crtc
->config
.dpll
;
4244 if (IS_PINEVIEW(dev
)) {
4245 fp
= (1 << clock
->n
) << 16 | clock
->m1
<< 8 | clock
->m2
;
4247 fp2
= (1 << reduced_clock
->n
) << 16 |
4248 reduced_clock
->m1
<< 8 | reduced_clock
->m2
;
4250 fp
= clock
->n
<< 16 | clock
->m1
<< 8 | clock
->m2
;
4252 fp2
= reduced_clock
->n
<< 16 | reduced_clock
->m1
<< 8 |
4256 I915_WRITE(FP0(pipe
), fp
);
4258 crtc
->lowfreq_avail
= false;
4259 if (intel_pipe_has_type(&crtc
->base
, INTEL_OUTPUT_LVDS
) &&
4260 reduced_clock
&& i915_powersave
) {
4261 I915_WRITE(FP1(pipe
), fp2
);
4262 crtc
->lowfreq_avail
= true;
4264 I915_WRITE(FP1(pipe
), fp
);
4268 static void intel_dp_set_m_n(struct intel_crtc
*crtc
)
4270 if (crtc
->config
.has_pch_encoder
)
4271 intel_pch_transcoder_set_m_n(crtc
, &crtc
->config
.dp_m_n
);
4273 intel_cpu_transcoder_set_m_n(crtc
, &crtc
->config
.dp_m_n
);
4276 static void vlv_update_pll(struct intel_crtc
*crtc
)
4278 struct drm_device
*dev
= crtc
->base
.dev
;
4279 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4280 int pipe
= crtc
->pipe
;
4281 u32 dpll
, mdiv
, pdiv
;
4282 u32 bestn
, bestm1
, bestm2
, bestp1
, bestp2
;
4286 lockmgr(&dev_priv
->dpio_lock
, LK_EXCLUSIVE
);
4288 is_sdvo
= intel_pipe_has_type(&crtc
->base
, INTEL_OUTPUT_SDVO
) ||
4289 intel_pipe_has_type(&crtc
->base
, INTEL_OUTPUT_HDMI
);
4291 dpll
= DPLL_VGA_MODE_DIS
;
4292 dpll
|= DPLL_EXT_BUFFER_ENABLE_VLV
;
4293 dpll
|= DPLL_REFA_CLK_ENABLE_VLV
;
4294 dpll
|= DPLL_INTEGRATED_CLOCK_VLV
;
4296 I915_WRITE(DPLL(pipe
), dpll
);
4297 POSTING_READ(DPLL(pipe
));
4299 bestn
= crtc
->config
.dpll
.n
;
4300 bestm1
= crtc
->config
.dpll
.m1
;
4301 bestm2
= crtc
->config
.dpll
.m2
;
4302 bestp1
= crtc
->config
.dpll
.p1
;
4303 bestp2
= crtc
->config
.dpll
.p2
;
4306 * In Valleyview PLL and program lane counter registers are exposed
4307 * through DPIO interface
4309 mdiv
= ((bestm1
<< DPIO_M1DIV_SHIFT
) | (bestm2
& DPIO_M2DIV_MASK
));
4310 mdiv
|= ((bestp1
<< DPIO_P1_SHIFT
) | (bestp2
<< DPIO_P2_SHIFT
));
4311 mdiv
|= ((bestn
<< DPIO_N_SHIFT
));
4312 mdiv
|= (1 << DPIO_POST_DIV_SHIFT
);
4313 mdiv
|= (1 << DPIO_K_SHIFT
);
4314 mdiv
|= DPIO_ENABLE_CALIBRATION
;
4315 intel_dpio_write(dev_priv
, DPIO_DIV(pipe
), mdiv
);
4317 intel_dpio_write(dev_priv
, DPIO_CORE_CLK(pipe
), 0x01000000);
4319 pdiv
= (1 << DPIO_REFSEL_OVERRIDE
) | (5 << DPIO_PLL_MODESEL_SHIFT
) |
4320 (3 << DPIO_BIAS_CURRENT_CTL_SHIFT
) | (1<<20) |
4321 (7 << DPIO_PLL_REFCLK_SEL_SHIFT
) | (8 << DPIO_DRIVER_CTL_SHIFT
) |
4322 (5 << DPIO_CLK_BIAS_CTL_SHIFT
);
4323 intel_dpio_write(dev_priv
, DPIO_REFSFR(pipe
), pdiv
);
4325 intel_dpio_write(dev_priv
, DPIO_LFP_COEFF(pipe
), 0x005f003b);
4327 dpll
|= DPLL_VCO_ENABLE
;
4328 I915_WRITE(DPLL(pipe
), dpll
);
4329 POSTING_READ(DPLL(pipe
));
4330 if (wait_for(((I915_READ(DPLL(pipe
)) & DPLL_LOCK_VLV
) == DPLL_LOCK_VLV
), 1))
4331 DRM_ERROR("DPLL %d failed to lock\n", pipe
);
4333 intel_dpio_write(dev_priv
, DPIO_FASTCLK_DISABLE
, 0x620);
4335 if (crtc
->config
.has_dp_encoder
)
4336 intel_dp_set_m_n(crtc
);
4338 I915_WRITE(DPLL(pipe
), dpll
);
4340 /* Wait for the clocks to stabilize. */
4341 POSTING_READ(DPLL(pipe
));
4347 if (crtc
->config
.pixel_multiplier
> 1) {
4348 temp
= (crtc
->config
.pixel_multiplier
- 1)
4349 << DPLL_MD_UDI_MULTIPLIER_SHIFT
;
4352 I915_WRITE(DPLL_MD(pipe
), temp
);
4353 POSTING_READ(DPLL_MD(pipe
));
4355 /* Now program lane control registers */
4356 if(intel_pipe_has_type(&crtc
->base
, INTEL_OUTPUT_DISPLAYPORT
)
4357 || intel_pipe_has_type(&crtc
->base
, INTEL_OUTPUT_HDMI
)) {
4361 intel_dpio_write(dev_priv
, DPIO_DATA_CHANNEL1
, temp
);
4364 if(intel_pipe_has_type(&crtc
->base
, INTEL_OUTPUT_EDP
)) {
4368 intel_dpio_write(dev_priv
, DPIO_DATA_CHANNEL2
, temp
);
4371 lockmgr(&dev_priv
->dpio_lock
, LK_RELEASE
);
4374 static void i9xx_update_pll(struct intel_crtc
*crtc
,
4375 intel_clock_t
*reduced_clock
,
4378 struct drm_device
*dev
= crtc
->base
.dev
;
4379 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4380 struct intel_encoder
*encoder
;
4381 int pipe
= crtc
->pipe
;
4384 struct dpll
*clock
= &crtc
->config
.dpll
;
4386 i9xx_update_pll_dividers(crtc
, reduced_clock
);
4388 is_sdvo
= intel_pipe_has_type(&crtc
->base
, INTEL_OUTPUT_SDVO
) ||
4389 intel_pipe_has_type(&crtc
->base
, INTEL_OUTPUT_HDMI
);
4391 dpll
= DPLL_VGA_MODE_DIS
;
4393 if (intel_pipe_has_type(&crtc
->base
, INTEL_OUTPUT_LVDS
))
4394 dpll
|= DPLLB_MODE_LVDS
;
4396 dpll
|= DPLLB_MODE_DAC_SERIAL
;
4399 if ((crtc
->config
.pixel_multiplier
> 1) &&
4400 (IS_I945G(dev
) || IS_I945GM(dev
) || IS_G33(dev
))) {
4401 dpll
|= (crtc
->config
.pixel_multiplier
- 1)
4402 << SDVO_MULTIPLIER_SHIFT_HIRES
;
4404 dpll
|= DPLL_DVO_HIGH_SPEED
;
4406 if (intel_pipe_has_type(&crtc
->base
, INTEL_OUTPUT_DISPLAYPORT
))
4407 dpll
|= DPLL_DVO_HIGH_SPEED
;
4409 /* compute bitmask from p1 value */
4410 if (IS_PINEVIEW(dev
))
4411 dpll
|= (1 << (clock
->p1
- 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW
;
4413 dpll
|= (1 << (clock
->p1
- 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT
;
4414 if (IS_G4X(dev
) && reduced_clock
)
4415 dpll
|= (1 << (reduced_clock
->p1
- 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT
;
4417 switch (clock
->p2
) {
4419 dpll
|= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5
;
4422 dpll
|= DPLLB_LVDS_P2_CLOCK_DIV_7
;
4425 dpll
|= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10
;
4428 dpll
|= DPLLB_LVDS_P2_CLOCK_DIV_14
;
4431 if (INTEL_INFO(dev
)->gen
>= 4)
4432 dpll
|= (6 << PLL_LOAD_PULSE_PHASE_SHIFT
);
4434 if (is_sdvo
&& intel_pipe_has_type(&crtc
->base
, INTEL_OUTPUT_TVOUT
))
4435 dpll
|= PLL_REF_INPUT_TVCLKINBC
;
4436 else if (intel_pipe_has_type(&crtc
->base
, INTEL_OUTPUT_TVOUT
))
4437 /* XXX: just matching BIOS for now */
4438 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
4440 else if (intel_pipe_has_type(&crtc
->base
, INTEL_OUTPUT_LVDS
) &&
4441 intel_panel_use_ssc(dev_priv
) && num_connectors
< 2)
4442 dpll
|= PLLB_REF_INPUT_SPREADSPECTRUMIN
;
4444 dpll
|= PLL_REF_INPUT_DREFCLK
;
4446 dpll
|= DPLL_VCO_ENABLE
;
4447 I915_WRITE(DPLL(pipe
), dpll
& ~DPLL_VCO_ENABLE
);
4448 POSTING_READ(DPLL(pipe
));
4451 for_each_encoder_on_crtc(dev
, &crtc
->base
, encoder
)
4452 if (encoder
->pre_pll_enable
)
4453 encoder
->pre_pll_enable(encoder
);
4455 if (crtc
->config
.has_dp_encoder
)
4456 intel_dp_set_m_n(crtc
);
4458 I915_WRITE(DPLL(pipe
), dpll
);
4460 /* Wait for the clocks to stabilize. */
4461 POSTING_READ(DPLL(pipe
));
4464 if (INTEL_INFO(dev
)->gen
>= 4) {
4468 if (crtc
->config
.pixel_multiplier
> 1) {
4469 temp
= (crtc
->config
.pixel_multiplier
- 1)
4470 << DPLL_MD_UDI_MULTIPLIER_SHIFT
;
4473 I915_WRITE(DPLL_MD(pipe
), temp
);
4475 /* The pixel multiplier can only be updated once the
4476 * DPLL is enabled and the clocks are stable.
4478 * So write it again.
4480 I915_WRITE(DPLL(pipe
), dpll
);
4484 static void i8xx_update_pll(struct intel_crtc
*crtc
,
4485 struct drm_display_mode
*adjusted_mode
,
4486 intel_clock_t
*reduced_clock
,
4489 struct drm_device
*dev
= crtc
->base
.dev
;
4490 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4491 struct intel_encoder
*encoder
;
4492 int pipe
= crtc
->pipe
;
4494 struct dpll
*clock
= &crtc
->config
.dpll
;
4496 i9xx_update_pll_dividers(crtc
, reduced_clock
);
4498 dpll
= DPLL_VGA_MODE_DIS
;
4500 if (intel_pipe_has_type(&crtc
->base
, INTEL_OUTPUT_LVDS
)) {
4501 dpll
|= (1 << (clock
->p1
- 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT
;
4504 dpll
|= PLL_P1_DIVIDE_BY_TWO
;
4506 dpll
|= (clock
->p1
- 2) << DPLL_FPA01_P1_POST_DIV_SHIFT
;
4508 dpll
|= PLL_P2_DIVIDE_BY_4
;
4511 if (intel_pipe_has_type(&crtc
->base
, INTEL_OUTPUT_LVDS
) &&
4512 intel_panel_use_ssc(dev_priv
) && num_connectors
< 2)
4513 dpll
|= PLLB_REF_INPUT_SPREADSPECTRUMIN
;
4515 dpll
|= PLL_REF_INPUT_DREFCLK
;
4517 dpll
|= DPLL_VCO_ENABLE
;
4518 I915_WRITE(DPLL(pipe
), dpll
& ~DPLL_VCO_ENABLE
);
4519 POSTING_READ(DPLL(pipe
));
4522 for_each_encoder_on_crtc(dev
, &crtc
->base
, encoder
)
4523 if (encoder
->pre_pll_enable
)
4524 encoder
->pre_pll_enable(encoder
);
4526 I915_WRITE(DPLL(pipe
), dpll
);
4528 /* Wait for the clocks to stabilize. */
4529 POSTING_READ(DPLL(pipe
));
4532 /* The pixel multiplier can only be updated once the
4533 * DPLL is enabled and the clocks are stable.
4535 * So write it again.
4537 I915_WRITE(DPLL(pipe
), dpll
);
4540 static void intel_set_pipe_timings(struct intel_crtc
*intel_crtc
,
4541 struct drm_display_mode
*mode
,
4542 struct drm_display_mode
*adjusted_mode
)
4544 struct drm_device
*dev
= intel_crtc
->base
.dev
;
4545 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4546 enum i915_pipe pipe
= intel_crtc
->pipe
;
4547 enum transcoder cpu_transcoder
= intel_crtc
->config
.cpu_transcoder
;
4548 uint32_t vsyncshift
;
4550 if (!IS_GEN2(dev
) && adjusted_mode
->flags
& DRM_MODE_FLAG_INTERLACE
) {
4551 /* the chip adds 2 halflines automatically */
4552 adjusted_mode
->crtc_vtotal
-= 1;
4553 adjusted_mode
->crtc_vblank_end
-= 1;
4554 vsyncshift
= adjusted_mode
->crtc_hsync_start
4555 - adjusted_mode
->crtc_htotal
/ 2;
4560 if (INTEL_INFO(dev
)->gen
> 3)
4561 I915_WRITE(VSYNCSHIFT(cpu_transcoder
), vsyncshift
);
4563 I915_WRITE(HTOTAL(cpu_transcoder
),
4564 (adjusted_mode
->crtc_hdisplay
- 1) |
4565 ((adjusted_mode
->crtc_htotal
- 1) << 16));
4566 I915_WRITE(HBLANK(cpu_transcoder
),
4567 (adjusted_mode
->crtc_hblank_start
- 1) |
4568 ((adjusted_mode
->crtc_hblank_end
- 1) << 16));
4569 I915_WRITE(HSYNC(cpu_transcoder
),
4570 (adjusted_mode
->crtc_hsync_start
- 1) |
4571 ((adjusted_mode
->crtc_hsync_end
- 1) << 16));
4573 I915_WRITE(VTOTAL(cpu_transcoder
),
4574 (adjusted_mode
->crtc_vdisplay
- 1) |
4575 ((adjusted_mode
->crtc_vtotal
- 1) << 16));
4576 I915_WRITE(VBLANK(cpu_transcoder
),
4577 (adjusted_mode
->crtc_vblank_start
- 1) |
4578 ((adjusted_mode
->crtc_vblank_end
- 1) << 16));
4579 I915_WRITE(VSYNC(cpu_transcoder
),
4580 (adjusted_mode
->crtc_vsync_start
- 1) |
4581 ((adjusted_mode
->crtc_vsync_end
- 1) << 16));
4583 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
4584 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
4585 * documented on the DDI_FUNC_CTL register description, EDP Input Select
4587 if (IS_HASWELL(dev
) && cpu_transcoder
== TRANSCODER_EDP
&&
4588 (pipe
== PIPE_B
|| pipe
== PIPE_C
))
4589 I915_WRITE(VTOTAL(pipe
), I915_READ(VTOTAL(cpu_transcoder
)));
4591 /* pipesrc controls the size that is scaled from, which should
4592 * always be the user's requested size.
4594 I915_WRITE(PIPESRC(pipe
),
4595 ((mode
->hdisplay
- 1) << 16) | (mode
->vdisplay
- 1));
4598 static void i9xx_set_pipeconf(struct intel_crtc
*intel_crtc
)
4600 struct drm_device
*dev
= intel_crtc
->base
.dev
;
4601 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4604 pipeconf
= I915_READ(PIPECONF(intel_crtc
->pipe
));
4606 if (intel_crtc
->pipe
== 0 && INTEL_INFO(dev
)->gen
< 4) {
4607 /* Enable pixel doubling when the dot clock is > 90% of the (display)
4610 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4613 if (intel_crtc
->config
.requested_mode
.clock
>
4614 dev_priv
->display
.get_display_clock_speed(dev
) * 9 / 10)
4615 pipeconf
|= PIPECONF_DOUBLE_WIDE
;
4617 pipeconf
&= ~PIPECONF_DOUBLE_WIDE
;
4620 /* default to 8bpc */
4621 pipeconf
&= ~(PIPECONF_BPC_MASK
| PIPECONF_DITHER_EN
);
4622 if (intel_crtc
->config
.has_dp_encoder
) {
4623 if (intel_crtc
->config
.dither
) {
4624 pipeconf
|= PIPECONF_6BPC
|
4625 PIPECONF_DITHER_EN
|
4626 PIPECONF_DITHER_TYPE_SP
;
4630 if (IS_VALLEYVIEW(dev
) && intel_pipe_has_type(&intel_crtc
->base
,
4631 INTEL_OUTPUT_EDP
)) {
4632 if (intel_crtc
->config
.dither
) {
4633 pipeconf
|= PIPECONF_6BPC
|
4635 I965_PIPECONF_ACTIVE
;
4639 if (HAS_PIPE_CXSR(dev
)) {
4640 if (intel_crtc
->lowfreq_avail
) {
4641 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4642 pipeconf
|= PIPECONF_CXSR_DOWNCLOCK
;
4644 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4645 pipeconf
&= ~PIPECONF_CXSR_DOWNCLOCK
;
4649 pipeconf
&= ~PIPECONF_INTERLACE_MASK
;
4650 if (!IS_GEN2(dev
) &&
4651 intel_crtc
->config
.adjusted_mode
.flags
& DRM_MODE_FLAG_INTERLACE
)
4652 pipeconf
|= PIPECONF_INTERLACE_W_FIELD_INDICATION
;
4654 pipeconf
|= PIPECONF_PROGRESSIVE
;
4656 if (IS_VALLEYVIEW(dev
)) {
4657 if (intel_crtc
->config
.limited_color_range
)
4658 pipeconf
|= PIPECONF_COLOR_RANGE_SELECT
;
4660 pipeconf
&= ~PIPECONF_COLOR_RANGE_SELECT
;
4663 I915_WRITE(PIPECONF(intel_crtc
->pipe
), pipeconf
);
4664 POSTING_READ(PIPECONF(intel_crtc
->pipe
));
4667 static int i9xx_crtc_mode_set(struct drm_crtc
*crtc
,
4669 struct drm_framebuffer
*fb
)
4671 struct drm_device
*dev
= crtc
->dev
;
4672 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4673 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
4674 struct drm_display_mode
*adjusted_mode
=
4675 &intel_crtc
->config
.adjusted_mode
;
4676 struct drm_display_mode
*mode
= &intel_crtc
->config
.requested_mode
;
4677 int pipe
= intel_crtc
->pipe
;
4678 int plane
= intel_crtc
->plane
;
4679 int refclk
, num_connectors
= 0;
4680 intel_clock_t clock
, reduced_clock
;
4682 bool ok
, has_reduced_clock
= false, is_sdvo
= false;
4683 bool is_lvds
= false, is_tv
= false;
4684 struct intel_encoder
*encoder
;
4685 const intel_limit_t
*limit
;
4688 for_each_encoder_on_crtc(dev
, crtc
, encoder
) {
4689 switch (encoder
->type
) {
4690 case INTEL_OUTPUT_LVDS
:
4693 case INTEL_OUTPUT_SDVO
:
4694 case INTEL_OUTPUT_HDMI
:
4696 if (encoder
->needs_tv_clock
)
4699 case INTEL_OUTPUT_TVOUT
:
4707 refclk
= i9xx_get_refclk(crtc
, num_connectors
);
4710 * Returns a set of divisors for the desired target clock with the given
4711 * refclk, or FALSE. The returned values represent the clock equation:
4712 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4714 limit
= intel_limit(crtc
, refclk
);
4715 ok
= limit
->find_pll(limit
, crtc
, adjusted_mode
->clock
, refclk
, NULL
,
4718 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4722 /* Ensure that the cursor is valid for the new mode before changing... */
4723 intel_crtc_update_cursor(crtc
, true);
4725 if (is_lvds
&& dev_priv
->lvds_downclock_avail
) {
4727 * Ensure we match the reduced clock's P to the target clock.
4728 * If the clocks don't match, we can't switch the display clock
4729 * by using the FP0/FP1. In such case we will disable the LVDS
4730 * downclock feature.
4732 has_reduced_clock
= limit
->find_pll(limit
, crtc
,
4733 dev_priv
->lvds_downclock
,
4738 /* Compat-code for transition, will disappear. */
4739 if (!intel_crtc
->config
.clock_set
) {
4740 intel_crtc
->config
.dpll
.n
= clock
.n
;
4741 intel_crtc
->config
.dpll
.m1
= clock
.m1
;
4742 intel_crtc
->config
.dpll
.m2
= clock
.m2
;
4743 intel_crtc
->config
.dpll
.p1
= clock
.p1
;
4744 intel_crtc
->config
.dpll
.p2
= clock
.p2
;
4747 if (is_sdvo
&& is_tv
)
4748 i9xx_adjust_sdvo_tv_clock(intel_crtc
);
4751 i8xx_update_pll(intel_crtc
, adjusted_mode
,
4752 has_reduced_clock
? &reduced_clock
: NULL
,
4754 else if (IS_VALLEYVIEW(dev
))
4755 vlv_update_pll(intel_crtc
);
4757 i9xx_update_pll(intel_crtc
,
4758 has_reduced_clock
? &reduced_clock
: NULL
,
4761 /* Set up the display plane register */
4762 dspcntr
= DISPPLANE_GAMMA_ENABLE
;
4764 if (!IS_VALLEYVIEW(dev
)) {
4766 dspcntr
&= ~DISPPLANE_SEL_PIPE_MASK
;
4768 dspcntr
|= DISPPLANE_SEL_PIPE_B
;
4771 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe
== 0 ? 'A' : 'B');
4772 drm_mode_debug_printmodeline(mode
);
4774 intel_set_pipe_timings(intel_crtc
, mode
, adjusted_mode
);
4776 /* pipesrc and dspsize control the size that is scaled from,
4777 * which should always be the user's requested size.
4779 I915_WRITE(DSPSIZE(plane
),
4780 ((mode
->vdisplay
- 1) << 16) |
4781 (mode
->hdisplay
- 1));
4782 I915_WRITE(DSPPOS(plane
), 0);
4784 i9xx_set_pipeconf(intel_crtc
);
4786 intel_enable_pipe(dev_priv
, pipe
, false);
4788 intel_wait_for_vblank(dev
, pipe
);
4790 I915_WRITE(DSPCNTR(plane
), dspcntr
);
4791 POSTING_READ(DSPCNTR(plane
));
4793 ret
= intel_pipe_set_base(crtc
, x
, y
, fb
);
4795 intel_update_watermarks(dev
);
4800 static bool i9xx_get_pipe_config(struct intel_crtc
*crtc
,
4801 struct intel_crtc_config
*pipe_config
)
4803 struct drm_device
*dev
= crtc
->base
.dev
;
4804 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4807 tmp
= I915_READ(PIPECONF(crtc
->pipe
));
4808 if (!(tmp
& PIPECONF_ENABLE
))
4814 static void ironlake_init_pch_refclk(struct drm_device
*dev
)
4816 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4817 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
4818 struct intel_encoder
*encoder
;
4820 bool has_lvds
= false;
4821 bool has_cpu_edp
= false;
4822 bool has_pch_edp
= false;
4823 bool has_panel
= false;
4824 bool has_ck505
= false;
4825 bool can_ssc
= false;
4827 /* We need to take the global config into account */
4828 list_for_each_entry(encoder
, &mode_config
->encoder_list
,
4830 switch (encoder
->type
) {
4831 case INTEL_OUTPUT_LVDS
:
4835 case INTEL_OUTPUT_EDP
:
4837 if (intel_encoder_is_pch_edp(&encoder
->base
))
4845 if (HAS_PCH_IBX(dev
)) {
4846 has_ck505
= dev_priv
->display_clock_mode
;
4847 can_ssc
= has_ck505
;
4853 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
4854 has_panel
, has_lvds
, has_pch_edp
, has_cpu_edp
,
4857 /* Ironlake: try to setup display ref clock before DPLL
4858 * enabling. This is only under driver's control after
4859 * PCH B stepping, previous chipset stepping should be
4860 * ignoring this setting.
4862 val
= I915_READ(PCH_DREF_CONTROL
);
4864 /* As we must carefully and slowly disable/enable each source in turn,
4865 * compute the final state we want first and check if we need to
4866 * make any changes at all.
4869 final
&= ~DREF_NONSPREAD_SOURCE_MASK
;
4871 final
|= DREF_NONSPREAD_CK505_ENABLE
;
4873 final
|= DREF_NONSPREAD_SOURCE_ENABLE
;
4875 final
&= ~DREF_SSC_SOURCE_MASK
;
4876 final
&= ~DREF_CPU_SOURCE_OUTPUT_MASK
;
4877 final
&= ~DREF_SSC1_ENABLE
;
4880 final
|= DREF_SSC_SOURCE_ENABLE
;
4882 if (intel_panel_use_ssc(dev_priv
) && can_ssc
)
4883 final
|= DREF_SSC1_ENABLE
;
4886 if (intel_panel_use_ssc(dev_priv
) && can_ssc
)
4887 final
|= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD
;
4889 final
|= DREF_CPU_SOURCE_OUTPUT_NONSPREAD
;
4891 final
|= DREF_CPU_SOURCE_OUTPUT_DISABLE
;
4893 final
|= DREF_SSC_SOURCE_DISABLE
;
4894 final
|= DREF_CPU_SOURCE_OUTPUT_DISABLE
;
4900 /* Always enable nonspread source */
4901 val
&= ~DREF_NONSPREAD_SOURCE_MASK
;
4904 val
|= DREF_NONSPREAD_CK505_ENABLE
;
4906 val
|= DREF_NONSPREAD_SOURCE_ENABLE
;
4909 val
&= ~DREF_SSC_SOURCE_MASK
;
4910 val
|= DREF_SSC_SOURCE_ENABLE
;
4912 /* SSC must be turned on before enabling the CPU output */
4913 if (intel_panel_use_ssc(dev_priv
) && can_ssc
) {
4914 DRM_DEBUG_KMS("Using SSC on panel\n");
4915 val
|= DREF_SSC1_ENABLE
;
4917 val
&= ~DREF_SSC1_ENABLE
;
4919 /* Get SSC going before enabling the outputs */
4920 I915_WRITE(PCH_DREF_CONTROL
, val
);
4921 POSTING_READ(PCH_DREF_CONTROL
);
4924 val
&= ~DREF_CPU_SOURCE_OUTPUT_MASK
;
4926 /* Enable CPU source on CPU attached eDP */
4928 if (intel_panel_use_ssc(dev_priv
) && can_ssc
) {
4929 DRM_DEBUG_KMS("Using SSC on eDP\n");
4930 val
|= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD
;
4933 val
|= DREF_CPU_SOURCE_OUTPUT_NONSPREAD
;
4935 val
|= DREF_CPU_SOURCE_OUTPUT_DISABLE
;
4937 I915_WRITE(PCH_DREF_CONTROL
, val
);
4938 POSTING_READ(PCH_DREF_CONTROL
);
4941 DRM_DEBUG_KMS("Disabling SSC entirely\n");
4943 val
&= ~DREF_CPU_SOURCE_OUTPUT_MASK
;
4945 /* Turn off CPU output */
4946 val
|= DREF_CPU_SOURCE_OUTPUT_DISABLE
;
4948 I915_WRITE(PCH_DREF_CONTROL
, val
);
4949 POSTING_READ(PCH_DREF_CONTROL
);
4952 /* Turn off the SSC source */
4953 val
&= ~DREF_SSC_SOURCE_MASK
;
4954 val
|= DREF_SSC_SOURCE_DISABLE
;
4957 val
&= ~DREF_SSC1_ENABLE
;
4959 I915_WRITE(PCH_DREF_CONTROL
, val
);
4960 POSTING_READ(PCH_DREF_CONTROL
);
4964 BUG_ON(val
!= final
);
4967 /* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
4968 static void lpt_init_pch_refclk(struct drm_device
*dev
)
4970 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4971 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
4972 struct intel_encoder
*encoder
;
4973 bool has_vga
= false;
4974 bool is_sdv
= false;
4977 list_for_each_entry(encoder
, &mode_config
->encoder_list
, base
.head
) {
4978 switch (encoder
->type
) {
4979 case INTEL_OUTPUT_ANALOG
:
4988 mutex_lock(&dev_priv
->dpio_lock
);
4990 /* XXX: Rip out SDV support once Haswell ships for real. */
4991 if (IS_HASWELL(dev
) && (dev
->pci_device
& 0xFF00) == 0x0C00)
4994 tmp
= intel_sbi_read(dev_priv
, SBI_SSCCTL
, SBI_ICLK
);
4995 tmp
&= ~SBI_SSCCTL_DISABLE
;
4996 tmp
|= SBI_SSCCTL_PATHALT
;
4997 intel_sbi_write(dev_priv
, SBI_SSCCTL
, tmp
, SBI_ICLK
);
5001 tmp
= intel_sbi_read(dev_priv
, SBI_SSCCTL
, SBI_ICLK
);
5002 tmp
&= ~SBI_SSCCTL_PATHALT
;
5003 intel_sbi_write(dev_priv
, SBI_SSCCTL
, tmp
, SBI_ICLK
);
5006 tmp
= I915_READ(SOUTH_CHICKEN2
);
5007 tmp
|= FDI_MPHY_IOSFSB_RESET_CTL
;
5008 I915_WRITE(SOUTH_CHICKEN2
, tmp
);
5010 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2
) &
5011 FDI_MPHY_IOSFSB_RESET_STATUS
, 100))
5012 DRM_ERROR("FDI mPHY reset assert timeout\n");
5014 tmp
= I915_READ(SOUTH_CHICKEN2
);
5015 tmp
&= ~FDI_MPHY_IOSFSB_RESET_CTL
;
5016 I915_WRITE(SOUTH_CHICKEN2
, tmp
);
5018 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2
) &
5019 FDI_MPHY_IOSFSB_RESET_STATUS
) == 0,
5021 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
5024 tmp
= intel_sbi_read(dev_priv
, 0x8008, SBI_MPHY
);
5025 tmp
&= ~(0xFF << 24);
5026 tmp
|= (0x12 << 24);
5027 intel_sbi_write(dev_priv
, 0x8008, tmp
, SBI_MPHY
);
5030 tmp
= intel_sbi_read(dev_priv
, 0x800C, SBI_MPHY
);
5032 intel_sbi_write(dev_priv
, 0x800C, tmp
, SBI_MPHY
);
5035 tmp
= intel_sbi_read(dev_priv
, 0x2008, SBI_MPHY
);
5037 intel_sbi_write(dev_priv
, 0x2008, tmp
, SBI_MPHY
);
5039 tmp
= intel_sbi_read(dev_priv
, 0x2108, SBI_MPHY
);
5041 intel_sbi_write(dev_priv
, 0x2108, tmp
, SBI_MPHY
);
5044 tmp
= intel_sbi_read(dev_priv
, 0x2038, SBI_MPHY
);
5045 tmp
|= (0x3F << 24) | (0xF << 20) | (0xF << 16);
5046 intel_sbi_write(dev_priv
, 0x2038, tmp
, SBI_MPHY
);
5048 tmp
= intel_sbi_read(dev_priv
, 0x2138, SBI_MPHY
);
5049 tmp
|= (0x3F << 24) | (0xF << 20) | (0xF << 16);
5050 intel_sbi_write(dev_priv
, 0x2138, tmp
, SBI_MPHY
);
5052 tmp
= intel_sbi_read(dev_priv
, 0x203C, SBI_MPHY
);
5054 intel_sbi_write(dev_priv
, 0x203C, tmp
, SBI_MPHY
);
5056 tmp
= intel_sbi_read(dev_priv
, 0x213C, SBI_MPHY
);
5058 intel_sbi_write(dev_priv
, 0x213C, tmp
, SBI_MPHY
);
5061 tmp
= intel_sbi_read(dev_priv
, 0x206C, SBI_MPHY
);
5062 tmp
|= (1 << 24) | (1 << 21) | (1 << 18);
5063 intel_sbi_write(dev_priv
, 0x206C, tmp
, SBI_MPHY
);
5065 tmp
= intel_sbi_read(dev_priv
, 0x216C, SBI_MPHY
);
5066 tmp
|= (1 << 24) | (1 << 21) | (1 << 18);
5067 intel_sbi_write(dev_priv
, 0x216C, tmp
, SBI_MPHY
);
5070 tmp
= intel_sbi_read(dev_priv
, 0x2080, SBI_MPHY
);
5073 intel_sbi_write(dev_priv
, 0x2080, tmp
, SBI_MPHY
);
5075 tmp
= intel_sbi_read(dev_priv
, 0x2180, SBI_MPHY
);
5078 intel_sbi_write(dev_priv
, 0x2180, tmp
, SBI_MPHY
);
5081 tmp
= intel_sbi_read(dev_priv
, 0x208C, SBI_MPHY
);
5084 intel_sbi_write(dev_priv
, 0x208C, tmp
, SBI_MPHY
);
5086 tmp
= intel_sbi_read(dev_priv
, 0x218C, SBI_MPHY
);
5089 intel_sbi_write(dev_priv
, 0x218C, tmp
, SBI_MPHY
);
5091 tmp
= intel_sbi_read(dev_priv
, 0x2098, SBI_MPHY
);
5092 tmp
&= ~(0xFF << 16);
5093 tmp
|= (0x1C << 16);
5094 intel_sbi_write(dev_priv
, 0x2098, tmp
, SBI_MPHY
);
5096 tmp
= intel_sbi_read(dev_priv
, 0x2198, SBI_MPHY
);
5097 tmp
&= ~(0xFF << 16);
5098 tmp
|= (0x1C << 16);
5099 intel_sbi_write(dev_priv
, 0x2198, tmp
, SBI_MPHY
);
5102 tmp
= intel_sbi_read(dev_priv
, 0x20C4, SBI_MPHY
);
5104 intel_sbi_write(dev_priv
, 0x20C4, tmp
, SBI_MPHY
);
5106 tmp
= intel_sbi_read(dev_priv
, 0x21C4, SBI_MPHY
);
5108 intel_sbi_write(dev_priv
, 0x21C4, tmp
, SBI_MPHY
);
5110 tmp
= intel_sbi_read(dev_priv
, 0x20EC, SBI_MPHY
);
5111 tmp
&= ~(0xF << 28);
5113 intel_sbi_write(dev_priv
, 0x20EC, tmp
, SBI_MPHY
);
5115 tmp
= intel_sbi_read(dev_priv
, 0x21EC, SBI_MPHY
);
5116 tmp
&= ~(0xF << 28);
5118 intel_sbi_write(dev_priv
, 0x21EC, tmp
, SBI_MPHY
);
5121 /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
5122 tmp
= intel_sbi_read(dev_priv
, SBI_DBUFF0
, SBI_ICLK
);
5123 tmp
|= SBI_DBUFF0_ENABLE
;
5124 intel_sbi_write(dev_priv
, SBI_DBUFF0
, tmp
, SBI_ICLK
);
5126 mutex_unlock(&dev_priv
->dpio_lock
);
5130 * Initialize reference clocks when the driver loads
5132 void intel_init_pch_refclk(struct drm_device
*dev
)
5134 if (HAS_PCH_IBX(dev
) || HAS_PCH_CPT(dev
))
5135 ironlake_init_pch_refclk(dev
);
5136 else if (HAS_PCH_LPT(dev
))
5137 lpt_init_pch_refclk(dev
);
5140 static int ironlake_get_refclk(struct drm_crtc
*crtc
)
5142 struct drm_device
*dev
= crtc
->dev
;
5143 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5144 struct intel_encoder
*encoder
;
5145 struct intel_encoder
*edp_encoder
= NULL
;
5146 int num_connectors
= 0;
5147 bool is_lvds
= false;
5149 for_each_encoder_on_crtc(dev
, crtc
, encoder
) {
5150 switch (encoder
->type
) {
5151 case INTEL_OUTPUT_LVDS
:
5154 case INTEL_OUTPUT_EDP
:
5155 edp_encoder
= encoder
;
5161 if (is_lvds
&& intel_panel_use_ssc(dev_priv
) && num_connectors
< 2) {
5162 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5163 dev_priv
->lvds_ssc_freq
);
5164 return dev_priv
->lvds_ssc_freq
* 1000;
5170 static void ironlake_set_pipeconf(struct drm_crtc
*crtc
,
5171 struct drm_display_mode
*adjusted_mode
,
5174 struct drm_i915_private
*dev_priv
= crtc
->dev
->dev_private
;
5175 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5176 int pipe
= intel_crtc
->pipe
;
5179 val
= I915_READ(PIPECONF(pipe
));
5181 val
&= ~PIPECONF_BPC_MASK
;
5182 switch (intel_crtc
->config
.pipe_bpp
) {
5184 val
|= PIPECONF_6BPC
;
5187 val
|= PIPECONF_8BPC
;
5190 val
|= PIPECONF_10BPC
;
5193 val
|= PIPECONF_12BPC
;
5196 /* Case prevented by intel_choose_pipe_bpp_dither. */
5200 val
&= ~(PIPECONF_DITHER_EN
| PIPECONF_DITHER_TYPE_MASK
);
5202 val
|= (PIPECONF_DITHER_EN
| PIPECONF_DITHER_TYPE_SP
);
5204 val
&= ~PIPECONF_INTERLACE_MASK
;
5205 if (adjusted_mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
5206 val
|= PIPECONF_INTERLACED_ILK
;
5208 val
|= PIPECONF_PROGRESSIVE
;
5210 if (intel_crtc
->config
.limited_color_range
)
5211 val
|= PIPECONF_COLOR_RANGE_SELECT
;
5213 val
&= ~PIPECONF_COLOR_RANGE_SELECT
;
5215 I915_WRITE(PIPECONF(pipe
), val
);
5216 POSTING_READ(PIPECONF(pipe
));
5220 * Set up the pipe CSC unit.
5222 * Currently only full range RGB to limited range RGB conversion
5223 * is supported, but eventually this should handle various
5224 * RGB<->YCbCr scenarios as well.
5226 static void intel_set_pipe_csc(struct drm_crtc
*crtc
)
5228 struct drm_device
*dev
= crtc
->dev
;
5229 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5230 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5231 int pipe
= intel_crtc
->pipe
;
5232 uint16_t coeff
= 0x7800; /* 1.0 */
5235 * TODO: Check what kind of values actually come out of the pipe
5236 * with these coeff/postoff values and adjust to get the best
5237 * accuracy. Perhaps we even need to take the bpc value into
5241 if (intel_crtc
->config
.limited_color_range
)
5242 coeff
= ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
5245 * GY/GU and RY/RU should be the other way around according
5246 * to BSpec, but reality doesn't agree. Just set them up in
5247 * a way that results in the correct picture.
5249 I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe
), coeff
<< 16);
5250 I915_WRITE(PIPE_CSC_COEFF_BY(pipe
), 0);
5252 I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe
), coeff
);
5253 I915_WRITE(PIPE_CSC_COEFF_BU(pipe
), 0);
5255 I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe
), 0);
5256 I915_WRITE(PIPE_CSC_COEFF_BV(pipe
), coeff
<< 16);
5258 I915_WRITE(PIPE_CSC_PREOFF_HI(pipe
), 0);
5259 I915_WRITE(PIPE_CSC_PREOFF_ME(pipe
), 0);
5260 I915_WRITE(PIPE_CSC_PREOFF_LO(pipe
), 0);
5262 if (INTEL_INFO(dev
)->gen
> 6) {
5263 uint16_t postoff
= 0;
5265 if (intel_crtc
->config
.limited_color_range
)
5266 postoff
= (16 * (1 << 13) / 255) & 0x1fff;
5268 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe
), postoff
);
5269 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe
), postoff
);
5270 I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe
), postoff
);
5272 I915_WRITE(PIPE_CSC_MODE(pipe
), 0);
5274 uint32_t mode
= CSC_MODE_YUV_TO_RGB
;
5276 if (intel_crtc
->config
.limited_color_range
)
5277 mode
|= CSC_BLACK_SCREEN_OFFSET
;
5279 I915_WRITE(PIPE_CSC_MODE(pipe
), mode
);
5283 static void haswell_set_pipeconf(struct drm_crtc
*crtc
,
5284 struct drm_display_mode
*adjusted_mode
,
5287 struct drm_i915_private
*dev_priv
= crtc
->dev
->dev_private
;
5288 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5289 enum transcoder cpu_transcoder
= intel_crtc
->config
.cpu_transcoder
;
5292 val
= I915_READ(PIPECONF(cpu_transcoder
));
5294 val
&= ~(PIPECONF_DITHER_EN
| PIPECONF_DITHER_TYPE_MASK
);
5296 val
|= (PIPECONF_DITHER_EN
| PIPECONF_DITHER_TYPE_SP
);
5298 val
&= ~PIPECONF_INTERLACE_MASK_HSW
;
5299 if (adjusted_mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
5300 val
|= PIPECONF_INTERLACED_ILK
;
5302 val
|= PIPECONF_PROGRESSIVE
;
5304 I915_WRITE(PIPECONF(cpu_transcoder
), val
);
5305 POSTING_READ(PIPECONF(cpu_transcoder
));
5308 static bool ironlake_compute_clocks(struct drm_crtc
*crtc
,
5309 struct drm_display_mode
*adjusted_mode
,
5310 intel_clock_t
*clock
,
5311 bool *has_reduced_clock
,
5312 intel_clock_t
*reduced_clock
)
5314 struct drm_device
*dev
= crtc
->dev
;
5315 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5316 struct intel_encoder
*intel_encoder
;
5318 const intel_limit_t
*limit
;
5319 bool ret
, is_sdvo
= false, is_tv
= false, is_lvds
= false;
5321 for_each_encoder_on_crtc(dev
, crtc
, intel_encoder
) {
5322 switch (intel_encoder
->type
) {
5323 case INTEL_OUTPUT_LVDS
:
5326 case INTEL_OUTPUT_SDVO
:
5327 case INTEL_OUTPUT_HDMI
:
5329 if (intel_encoder
->needs_tv_clock
)
5332 case INTEL_OUTPUT_TVOUT
:
5338 refclk
= ironlake_get_refclk(crtc
);
5341 * Returns a set of divisors for the desired target clock with the given
5342 * refclk, or FALSE. The returned values represent the clock equation:
5343 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5345 limit
= intel_limit(crtc
, refclk
);
5346 ret
= limit
->find_pll(limit
, crtc
, adjusted_mode
->clock
, refclk
, NULL
,
5351 if (is_lvds
&& dev_priv
->lvds_downclock_avail
) {
5353 * Ensure we match the reduced clock's P to the target clock.
5354 * If the clocks don't match, we can't switch the display clock
5355 * by using the FP0/FP1. In such case we will disable the LVDS
5356 * downclock feature.
5358 *has_reduced_clock
= limit
->find_pll(limit
, crtc
,
5359 dev_priv
->lvds_downclock
,
5365 if (is_sdvo
&& is_tv
)
5366 i9xx_adjust_sdvo_tv_clock(to_intel_crtc(crtc
));
5371 static void cpt_enable_fdi_bc_bifurcation(struct drm_device
*dev
)
5373 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5376 temp
= I915_READ(SOUTH_CHICKEN1
);
5377 if (temp
& FDI_BC_BIFURCATION_SELECT
)
5380 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B
)) & FDI_RX_ENABLE
);
5381 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C
)) & FDI_RX_ENABLE
);
5383 temp
|= FDI_BC_BIFURCATION_SELECT
;
5384 DRM_DEBUG_KMS("enabling fdi C rx\n");
5385 I915_WRITE(SOUTH_CHICKEN1
, temp
);
5386 POSTING_READ(SOUTH_CHICKEN1
);
5389 static bool ironlake_check_fdi_lanes(struct intel_crtc
*intel_crtc
)
5391 struct drm_device
*dev
= intel_crtc
->base
.dev
;
5392 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5393 struct intel_crtc
*pipe_B_crtc
=
5394 to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[PIPE_B
]);
5396 DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n",
5397 intel_crtc
->pipe
, intel_crtc
->fdi_lanes
);
5398 if (intel_crtc
->fdi_lanes
> 4) {
5399 DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n",
5400 intel_crtc
->pipe
, intel_crtc
->fdi_lanes
);
5401 /* Clamp lanes to avoid programming the hw with bogus values. */
5402 intel_crtc
->fdi_lanes
= 4;
5407 if (INTEL_INFO(dev
)->num_pipes
== 2)
5410 switch (intel_crtc
->pipe
) {
5414 if (dev_priv
->pipe_to_crtc_mapping
[PIPE_C
]->enabled
&&
5415 intel_crtc
->fdi_lanes
> 2) {
5416 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
5417 intel_crtc
->pipe
, intel_crtc
->fdi_lanes
);
5418 /* Clamp lanes to avoid programming the hw with bogus values. */
5419 intel_crtc
->fdi_lanes
= 2;
5424 if (intel_crtc
->fdi_lanes
> 2)
5425 WARN_ON(I915_READ(SOUTH_CHICKEN1
) & FDI_BC_BIFURCATION_SELECT
);
5427 cpt_enable_fdi_bc_bifurcation(dev
);
5431 if (!pipe_B_crtc
->base
.enabled
|| pipe_B_crtc
->fdi_lanes
<= 2) {
5432 if (intel_crtc
->fdi_lanes
> 2) {
5433 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
5434 intel_crtc
->pipe
, intel_crtc
->fdi_lanes
);
5435 /* Clamp lanes to avoid programming the hw with bogus values. */
5436 intel_crtc
->fdi_lanes
= 2;
5441 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
5445 cpt_enable_fdi_bc_bifurcation(dev
);
5453 int ironlake_get_lanes_required(int target_clock
, int link_bw
, int bpp
)
5456 * Account for spread spectrum to avoid
5457 * oversubscribing the link. Max center spread
5458 * is 2.5%; use 5% for safety's sake.
5460 u32 bps
= target_clock
* bpp
* 21 / 20;
5461 return bps
/ (link_bw
* 8) + 1;
5464 void intel_pch_transcoder_set_m_n(struct intel_crtc
*crtc
,
5465 struct intel_link_m_n
*m_n
)
5467 struct drm_device
*dev
= crtc
->base
.dev
;
5468 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5469 int pipe
= crtc
->pipe
;
5471 I915_WRITE(TRANSDATA_M1(pipe
), TU_SIZE(m_n
->tu
) | m_n
->gmch_m
);
5472 I915_WRITE(TRANSDATA_N1(pipe
), m_n
->gmch_n
);
5473 I915_WRITE(TRANSDPLINK_M1(pipe
), m_n
->link_m
);
5474 I915_WRITE(TRANSDPLINK_N1(pipe
), m_n
->link_n
);
5477 void intel_cpu_transcoder_set_m_n(struct intel_crtc
*crtc
,
5478 struct intel_link_m_n
*m_n
)
5480 struct drm_device
*dev
= crtc
->base
.dev
;
5481 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5482 int pipe
= crtc
->pipe
;
5483 enum transcoder transcoder
= crtc
->config
.cpu_transcoder
;
5485 if (INTEL_INFO(dev
)->gen
>= 5) {
5486 I915_WRITE(PIPE_DATA_M1(transcoder
), TU_SIZE(m_n
->tu
) | m_n
->gmch_m
);
5487 I915_WRITE(PIPE_DATA_N1(transcoder
), m_n
->gmch_n
);
5488 I915_WRITE(PIPE_LINK_M1(transcoder
), m_n
->link_m
);
5489 I915_WRITE(PIPE_LINK_N1(transcoder
), m_n
->link_n
);
5491 I915_WRITE(PIPE_GMCH_DATA_M(pipe
), TU_SIZE(m_n
->tu
) | m_n
->gmch_m
);
5492 I915_WRITE(PIPE_GMCH_DATA_N(pipe
), m_n
->gmch_n
);
5493 I915_WRITE(PIPE_DP_LINK_M(pipe
), m_n
->link_m
);
5494 I915_WRITE(PIPE_DP_LINK_N(pipe
), m_n
->link_n
);
5498 static void ironlake_fdi_set_m_n(struct drm_crtc
*crtc
)
5500 struct drm_device
*dev
= crtc
->dev
;
5501 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5502 struct drm_display_mode
*adjusted_mode
=
5503 &intel_crtc
->config
.adjusted_mode
;
5504 struct intel_link_m_n m_n
= {0};
5505 int target_clock
, lane
, link_bw
;
5507 /* FDI is a binary signal running at ~2.7GHz, encoding
5508 * each output octet as 10 bits. The actual frequency
5509 * is stored as a divider into a 100MHz clock, and the
5510 * mode pixel clock is stored in units of 1KHz.
5511 * Hence the bw of each lane in terms of the mode signal
5514 link_bw
= intel_fdi_link_freq(dev
) * MHz(100)/KHz(1)/10;
5516 if (intel_crtc
->config
.pixel_target_clock
)
5517 target_clock
= intel_crtc
->config
.pixel_target_clock
;
5519 target_clock
= adjusted_mode
->clock
;
5521 lane
= ironlake_get_lanes_required(target_clock
, link_bw
,
5522 intel_crtc
->config
.pipe_bpp
);
5524 intel_crtc
->fdi_lanes
= lane
;
5526 if (intel_crtc
->config
.pixel_multiplier
> 1)
5527 link_bw
*= intel_crtc
->config
.pixel_multiplier
;
5528 intel_link_compute_m_n(intel_crtc
->config
.pipe_bpp
, lane
, target_clock
,
5531 intel_cpu_transcoder_set_m_n(intel_crtc
, &m_n
);
5534 static uint32_t ironlake_compute_dpll(struct intel_crtc
*intel_crtc
,
5535 intel_clock_t
*clock
, u32
*fp
,
5536 intel_clock_t
*reduced_clock
, u32
*fp2
)
5538 struct drm_crtc
*crtc
= &intel_crtc
->base
;
5539 struct drm_device
*dev
= crtc
->dev
;
5540 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5541 struct intel_encoder
*intel_encoder
;
5543 int factor
, num_connectors
= 0;
5544 bool is_lvds
= false, is_sdvo
= false, is_tv
= false;
5546 for_each_encoder_on_crtc(dev
, crtc
, intel_encoder
) {
5547 switch (intel_encoder
->type
) {
5548 case INTEL_OUTPUT_LVDS
:
5551 case INTEL_OUTPUT_SDVO
:
5552 case INTEL_OUTPUT_HDMI
:
5554 if (intel_encoder
->needs_tv_clock
)
5557 case INTEL_OUTPUT_TVOUT
:
5565 /* Enable autotuning of the PLL clock (if permissible) */
5568 if ((intel_panel_use_ssc(dev_priv
) &&
5569 dev_priv
->lvds_ssc_freq
== 100) ||
5570 (HAS_PCH_IBX(dev
) && intel_is_dual_link_lvds(dev
)))
5572 } else if (is_sdvo
&& is_tv
)
5575 if (clock
->m
< factor
* clock
->n
)
5578 if (fp2
&& (reduced_clock
->m
< factor
* reduced_clock
->n
))
5584 dpll
|= DPLLB_MODE_LVDS
;
5586 dpll
|= DPLLB_MODE_DAC_SERIAL
;
5588 if (intel_crtc
->config
.pixel_multiplier
> 1) {
5589 dpll
|= (intel_crtc
->config
.pixel_multiplier
- 1)
5590 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT
;
5592 dpll
|= DPLL_DVO_HIGH_SPEED
;
5594 if (intel_crtc
->config
.has_dp_encoder
&&
5595 intel_crtc
->config
.has_pch_encoder
)
5596 dpll
|= DPLL_DVO_HIGH_SPEED
;
5598 /* compute bitmask from p1 value */
5599 dpll
|= (1 << (clock
->p1
- 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT
;
5601 dpll
|= (1 << (clock
->p1
- 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT
;
5603 switch (clock
->p2
) {
5605 dpll
|= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5
;
5608 dpll
|= DPLLB_LVDS_P2_CLOCK_DIV_7
;
5611 dpll
|= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10
;
5614 dpll
|= DPLLB_LVDS_P2_CLOCK_DIV_14
;
5618 if (is_sdvo
&& is_tv
)
5619 dpll
|= PLL_REF_INPUT_TVCLKINBC
;
5621 /* XXX: just matching BIOS for now */
5622 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
5624 else if (is_lvds
&& intel_panel_use_ssc(dev_priv
) && num_connectors
< 2)
5625 dpll
|= PLLB_REF_INPUT_SPREADSPECTRUMIN
;
5627 dpll
|= PLL_REF_INPUT_DREFCLK
;
5632 static int ironlake_crtc_mode_set(struct drm_crtc
*crtc
,
5634 struct drm_framebuffer
*fb
)
5636 struct drm_device
*dev
= crtc
->dev
;
5637 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5638 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5639 struct drm_display_mode
*adjusted_mode
=
5640 &intel_crtc
->config
.adjusted_mode
;
5641 struct drm_display_mode
*mode
= &intel_crtc
->config
.requested_mode
;
5642 int pipe
= intel_crtc
->pipe
;
5643 int plane
= intel_crtc
->plane
;
5644 int num_connectors
= 0;
5645 intel_clock_t clock
, reduced_clock
;
5646 u32 dpll
, fp
= 0, fp2
= 0;
5647 bool ok
, has_reduced_clock
= false;
5648 bool is_lvds
= false;
5649 struct intel_encoder
*encoder
;
5651 bool dither
, fdi_config_ok
;
5653 for_each_encoder_on_crtc(dev
, crtc
, encoder
) {
5654 switch (encoder
->type
) {
5655 case INTEL_OUTPUT_LVDS
:
5663 WARN(!(HAS_PCH_IBX(dev
) || HAS_PCH_CPT(dev
)),
5664 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev
));
5666 intel_crtc
->config
.cpu_transcoder
= pipe
;
5668 ok
= ironlake_compute_clocks(crtc
, adjusted_mode
, &clock
,
5669 &has_reduced_clock
, &reduced_clock
);
5671 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5674 /* Compat-code for transition, will disappear. */
5675 if (!intel_crtc
->config
.clock_set
) {
5676 intel_crtc
->config
.dpll
.n
= clock
.n
;
5677 intel_crtc
->config
.dpll
.m1
= clock
.m1
;
5678 intel_crtc
->config
.dpll
.m2
= clock
.m2
;
5679 intel_crtc
->config
.dpll
.p1
= clock
.p1
;
5680 intel_crtc
->config
.dpll
.p2
= clock
.p2
;
5683 /* Ensure that the cursor is valid for the new mode before changing... */
5684 intel_crtc_update_cursor(crtc
, true);
5686 /* determine panel color depth */
5687 dither
= intel_crtc
->config
.dither
;
5688 if (is_lvds
&& dev_priv
->lvds_dither
)
5691 fp
= clock
.n
<< 16 | clock
.m1
<< 8 | clock
.m2
;
5692 if (has_reduced_clock
)
5693 fp2
= reduced_clock
.n
<< 16 | reduced_clock
.m1
<< 8 |
5696 dpll
= ironlake_compute_dpll(intel_crtc
, &clock
, &fp
, &reduced_clock
,
5697 has_reduced_clock
? &fp2
: NULL
);
5699 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe
);
5700 drm_mode_debug_printmodeline(mode
);
5702 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
5703 if (intel_crtc
->config
.has_pch_encoder
) {
5704 struct intel_pch_pll
*pll
;
5706 pll
= intel_get_pch_pll(intel_crtc
, dpll
, fp
);
5708 DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
5713 intel_put_pch_pll(intel_crtc
);
5715 if (intel_crtc
->config
.has_dp_encoder
)
5716 intel_dp_set_m_n(intel_crtc
);
5718 for_each_encoder_on_crtc(dev
, crtc
, encoder
)
5719 if (encoder
->pre_pll_enable
)
5720 encoder
->pre_pll_enable(encoder
);
5722 if (intel_crtc
->pch_pll
) {
5723 I915_WRITE(intel_crtc
->pch_pll
->pll_reg
, dpll
);
5725 /* Wait for the clocks to stabilize. */
5726 POSTING_READ(intel_crtc
->pch_pll
->pll_reg
);
5729 /* The pixel multiplier can only be updated once the
5730 * DPLL is enabled and the clocks are stable.
5732 * So write it again.
5734 I915_WRITE(intel_crtc
->pch_pll
->pll_reg
, dpll
);
5737 intel_crtc
->lowfreq_avail
= false;
5738 if (intel_crtc
->pch_pll
) {
5739 if (is_lvds
&& has_reduced_clock
&& i915_powersave
) {
5740 I915_WRITE(intel_crtc
->pch_pll
->fp1_reg
, fp2
);
5741 intel_crtc
->lowfreq_avail
= true;
5743 I915_WRITE(intel_crtc
->pch_pll
->fp1_reg
, fp
);
5747 intel_set_pipe_timings(intel_crtc
, mode
, adjusted_mode
);
5749 /* Note, this also computes intel_crtc->fdi_lanes which is used below in
5750 * ironlake_check_fdi_lanes. */
5751 intel_crtc
->fdi_lanes
= 0;
5752 if (intel_crtc
->config
.has_pch_encoder
)
5753 ironlake_fdi_set_m_n(crtc
);
5755 fdi_config_ok
= ironlake_check_fdi_lanes(intel_crtc
);
5757 ironlake_set_pipeconf(crtc
, adjusted_mode
, dither
);
5759 intel_wait_for_vblank(dev
, pipe
);
5761 /* Set up the display plane register */
5762 I915_WRITE(DSPCNTR(plane
), DISPPLANE_GAMMA_ENABLE
);
5763 POSTING_READ(DSPCNTR(plane
));
5765 ret
= intel_pipe_set_base(crtc
, x
, y
, fb
);
5767 intel_update_watermarks(dev
);
5769 intel_update_linetime_watermarks(dev
, pipe
, adjusted_mode
);
5771 return fdi_config_ok
? ret
: -EINVAL
;
5774 static bool ironlake_get_pipe_config(struct intel_crtc
*crtc
,
5775 struct intel_crtc_config
*pipe_config
)
5777 struct drm_device
*dev
= crtc
->base
.dev
;
5778 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5781 tmp
= I915_READ(PIPECONF(crtc
->pipe
));
5782 if (!(tmp
& PIPECONF_ENABLE
))
5785 if (I915_READ(TRANSCONF(crtc
->pipe
)) & TRANS_ENABLE
)
5786 pipe_config
->has_pch_encoder
= true;
5791 static void haswell_modeset_global_resources(struct drm_device
*dev
)
5793 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5794 bool enable
= false;
5795 struct intel_crtc
*crtc
;
5796 struct intel_encoder
*encoder
;
5798 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, base
.head
) {
5799 if (crtc
->pipe
!= PIPE_A
&& crtc
->base
.enabled
)
5801 /* XXX: Should check for edp transcoder here, but thanks to init
5802 * sequence that's not yet available. Just in case desktop eDP
5803 * on PORT D is possible on haswell, too. */
5806 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
5808 if (encoder
->type
!= INTEL_OUTPUT_EDP
&&
5809 encoder
->connectors_active
)
5813 /* Even the eDP panel fitter is outside the always-on well. */
5814 if (dev_priv
->pch_pf_size
)
5817 intel_set_power_well(dev
, enable
);
5820 static int haswell_crtc_mode_set(struct drm_crtc
*crtc
,
5822 struct drm_framebuffer
*fb
)
5824 struct drm_device
*dev
= crtc
->dev
;
5825 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5826 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5827 struct drm_display_mode
*adjusted_mode
=
5828 &intel_crtc
->config
.adjusted_mode
;
5829 struct drm_display_mode
*mode
= &intel_crtc
->config
.requested_mode
;
5830 int pipe
= intel_crtc
->pipe
;
5831 int plane
= intel_crtc
->plane
;
5832 int num_connectors
= 0;
5833 bool is_cpu_edp
= false;
5834 struct intel_encoder
*encoder
;
5838 for_each_encoder_on_crtc(dev
, crtc
, encoder
) {
5839 switch (encoder
->type
) {
5840 case INTEL_OUTPUT_EDP
:
5841 if (!intel_encoder_is_pch_edp(&encoder
->base
))
5850 intel_crtc
->config
.cpu_transcoder
= TRANSCODER_EDP
;
5852 intel_crtc
->config
.cpu_transcoder
= pipe
;
5854 /* We are not sure yet this won't happen. */
5855 WARN(!HAS_PCH_LPT(dev
), "Unexpected PCH type %d\n",
5856 INTEL_PCH_TYPE(dev
));
5858 WARN(num_connectors
!= 1, "%d connectors attached to pipe %c\n",
5859 num_connectors
, pipe_name(pipe
));
5861 WARN_ON(I915_READ(PIPECONF(intel_crtc
->config
.cpu_transcoder
)) &
5862 (PIPECONF_ENABLE
| I965_PIPECONF_ACTIVE
));
5864 WARN_ON(I915_READ(DSPCNTR(plane
)) & DISPLAY_PLANE_ENABLE
);
5866 if (!intel_ddi_pll_mode_set(crtc
, adjusted_mode
->clock
))
5869 /* Ensure that the cursor is valid for the new mode before changing... */
5870 intel_crtc_update_cursor(crtc
, true);
5872 /* determine panel color depth */
5873 dither
= intel_crtc
->config
.dither
;
5875 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe
);
5876 drm_mode_debug_printmodeline(mode
);
5878 if (intel_crtc
->config
.has_dp_encoder
)
5879 intel_dp_set_m_n(intel_crtc
);
5881 intel_crtc
->lowfreq_avail
= false;
5883 intel_set_pipe_timings(intel_crtc
, mode
, adjusted_mode
);
5885 if (intel_crtc
->config
.has_pch_encoder
)
5886 ironlake_fdi_set_m_n(crtc
);
5888 haswell_set_pipeconf(crtc
, adjusted_mode
, dither
);
5890 intel_set_pipe_csc(crtc
);
5892 /* Set up the display plane register */
5893 I915_WRITE(DSPCNTR(plane
), DISPPLANE_GAMMA_ENABLE
| DISPPLANE_PIPE_CSC_ENABLE
);
5894 POSTING_READ(DSPCNTR(plane
));
5896 ret
= intel_pipe_set_base(crtc
, x
, y
, fb
);
5898 intel_update_watermarks(dev
);
5900 intel_update_linetime_watermarks(dev
, pipe
, adjusted_mode
);
5905 static bool haswell_get_pipe_config(struct intel_crtc
*crtc
,
5906 struct intel_crtc_config
*pipe_config
)
5908 struct drm_device
*dev
= crtc
->base
.dev
;
5909 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5912 tmp
= I915_READ(PIPECONF(crtc
->config
.cpu_transcoder
));
5913 if (!(tmp
& PIPECONF_ENABLE
))
5917 * aswell has only FDI/PCH transcoder A. It is which is connected to
5918 * DDI E. So just check whether this pipe is wired to DDI E and whether
5919 * the PCH transcoder is on.
5921 tmp
= I915_READ(TRANS_DDI_FUNC_CTL(crtc
->pipe
));
5922 if ((tmp
& TRANS_DDI_PORT_MASK
) == TRANS_DDI_SELECT_PORT(PORT_E
) &&
5923 I915_READ(TRANSCONF(PIPE_A
)) & TRANS_ENABLE
)
5924 pipe_config
->has_pch_encoder
= true;
5930 static int intel_crtc_mode_set(struct drm_crtc
*crtc
,
5932 struct drm_framebuffer
*fb
)
5934 struct drm_device
*dev
= crtc
->dev
;
5935 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5936 struct drm_encoder_helper_funcs
*encoder_funcs
;
5937 struct intel_encoder
*encoder
;
5938 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5939 struct drm_display_mode
*adjusted_mode
=
5940 &intel_crtc
->config
.adjusted_mode
;
5941 struct drm_display_mode
*mode
= &intel_crtc
->config
.requested_mode
;
5942 int pipe
= intel_crtc
->pipe
;
5945 drm_vblank_pre_modeset(dev
, pipe
);
5947 ret
= dev_priv
->display
.crtc_mode_set(crtc
, x
, y
, fb
);
5949 drm_vblank_post_modeset(dev
, pipe
);
5954 for_each_encoder_on_crtc(dev
, crtc
, encoder
) {
5955 DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
5956 encoder
->base
.base
.id
,
5957 drm_get_encoder_name(&encoder
->base
),
5958 mode
->base
.id
, mode
->name
);
5959 if (encoder
->mode_set
) {
5960 encoder
->mode_set(encoder
);
5962 encoder_funcs
= encoder
->base
.helper_private
;
5963 encoder_funcs
->mode_set(&encoder
->base
, mode
, adjusted_mode
);
5970 static bool intel_eld_uptodate(struct drm_connector
*connector
,
5971 int reg_eldv
, uint32_t bits_eldv
,
5972 int reg_elda
, uint32_t bits_elda
,
5975 struct drm_i915_private
*dev_priv
= connector
->dev
->dev_private
;
5976 uint8_t *eld
= connector
->eld
;
5979 i
= I915_READ(reg_eldv
);
5988 i
= I915_READ(reg_elda
);
5990 I915_WRITE(reg_elda
, i
);
5992 for (i
= 0; i
< eld
[2]; i
++)
5993 if (I915_READ(reg_edid
) != *((uint32_t *)eld
+ i
))
5999 static void g4x_write_eld(struct drm_connector
*connector
,
6000 struct drm_crtc
*crtc
)
6002 struct drm_i915_private
*dev_priv
= connector
->dev
->dev_private
;
6003 uint8_t *eld
= connector
->eld
;
6008 i
= I915_READ(G4X_AUD_VID_DID
);
6010 if (i
== INTEL_AUDIO_DEVBLC
|| i
== INTEL_AUDIO_DEVCL
)
6011 eldv
= G4X_ELDV_DEVCL_DEVBLC
;
6013 eldv
= G4X_ELDV_DEVCTG
;
6015 if (intel_eld_uptodate(connector
,
6016 G4X_AUD_CNTL_ST
, eldv
,
6017 G4X_AUD_CNTL_ST
, G4X_ELD_ADDR
,
6018 G4X_HDMIW_HDMIEDID
))
6021 i
= I915_READ(G4X_AUD_CNTL_ST
);
6022 i
&= ~(eldv
| G4X_ELD_ADDR
);
6023 len
= (i
>> 9) & 0x1f; /* ELD buffer size */
6024 I915_WRITE(G4X_AUD_CNTL_ST
, i
);
6029 len
= min_t(uint8_t, eld
[2], len
);
6030 DRM_DEBUG_DRIVER("ELD size %d\n", len
);
6031 for (i
= 0; i
< len
; i
++)
6032 I915_WRITE(G4X_HDMIW_HDMIEDID
, *((uint32_t *)eld
+ i
));
6034 i
= I915_READ(G4X_AUD_CNTL_ST
);
6036 I915_WRITE(G4X_AUD_CNTL_ST
, i
);
6039 static void haswell_write_eld(struct drm_connector
*connector
,
6040 struct drm_crtc
*crtc
)
6042 struct drm_i915_private
*dev_priv
= connector
->dev
->dev_private
;
6043 uint8_t *eld
= connector
->eld
;
6044 struct drm_device
*dev
= crtc
->dev
;
6045 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6049 int pipe
= to_intel_crtc(crtc
)->pipe
;
6052 int hdmiw_hdmiedid
= HSW_AUD_EDID_DATA(pipe
);
6053 int aud_cntl_st
= HSW_AUD_DIP_ELD_CTRL(pipe
);
6054 int aud_config
= HSW_AUD_CFG(pipe
);
6055 int aud_cntrl_st2
= HSW_AUD_PIN_ELD_CP_VLD
;
6058 DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
6060 /* Audio output enable */
6061 DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
6062 tmp
= I915_READ(aud_cntrl_st2
);
6063 tmp
|= (AUDIO_OUTPUT_ENABLE_A
<< (pipe
* 4));
6064 I915_WRITE(aud_cntrl_st2
, tmp
);
6066 /* Wait for 1 vertical blank */
6067 intel_wait_for_vblank(dev
, pipe
);
6069 /* Set ELD valid state */
6070 tmp
= I915_READ(aud_cntrl_st2
);
6071 DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp
);
6072 tmp
|= (AUDIO_ELD_VALID_A
<< (pipe
* 4));
6073 I915_WRITE(aud_cntrl_st2
, tmp
);
6074 tmp
= I915_READ(aud_cntrl_st2
);
6075 DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp
);
6077 /* Enable HDMI mode */
6078 tmp
= I915_READ(aud_config
);
6079 DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp
);
6080 /* clear N_programing_enable and N_value_index */
6081 tmp
&= ~(AUD_CONFIG_N_VALUE_INDEX
| AUD_CONFIG_N_PROG_ENABLE
);
6082 I915_WRITE(aud_config
, tmp
);
6084 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe
));
6086 eldv
= AUDIO_ELD_VALID_A
<< (pipe
* 4);
6087 intel_crtc
->eld_vld
= true;
6089 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_DISPLAYPORT
)) {
6090 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6091 eld
[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
6092 I915_WRITE(aud_config
, AUD_CONFIG_N_VALUE_INDEX
); /* 0x1 = DP */
6094 I915_WRITE(aud_config
, 0);
6096 if (intel_eld_uptodate(connector
,
6097 aud_cntrl_st2
, eldv
,
6098 aud_cntl_st
, IBX_ELD_ADDRESS
,
6102 i
= I915_READ(aud_cntrl_st2
);
6104 I915_WRITE(aud_cntrl_st2
, i
);
6109 i
= I915_READ(aud_cntl_st
);
6110 i
&= ~IBX_ELD_ADDRESS
;
6111 I915_WRITE(aud_cntl_st
, i
);
6112 i
= (i
>> 29) & DIP_PORT_SEL_MASK
; /* DIP_Port_Select, 0x1 = PortB */
6113 DRM_DEBUG_DRIVER("port num:%d\n", i
);
6115 len
= min_t(uint8_t, eld
[2], 21); /* 84 bytes of hw ELD buffer */
6116 DRM_DEBUG_DRIVER("ELD size %d\n", len
);
6117 for (i
= 0; i
< len
; i
++)
6118 I915_WRITE(hdmiw_hdmiedid
, *((uint32_t *)eld
+ i
));
6120 i
= I915_READ(aud_cntrl_st2
);
6122 I915_WRITE(aud_cntrl_st2
, i
);
6126 static void ironlake_write_eld(struct drm_connector
*connector
,
6127 struct drm_crtc
*crtc
)
6129 struct drm_i915_private
*dev_priv
= connector
->dev
->dev_private
;
6130 uint8_t *eld
= connector
->eld
;
6138 int pipe
= to_intel_crtc(crtc
)->pipe
;
6140 if (HAS_PCH_IBX(connector
->dev
)) {
6141 hdmiw_hdmiedid
= IBX_HDMIW_HDMIEDID(pipe
);
6142 aud_config
= IBX_AUD_CFG(pipe
);
6143 aud_cntl_st
= IBX_AUD_CNTL_ST(pipe
);
6144 aud_cntrl_st2
= IBX_AUD_CNTL_ST2
;
6146 hdmiw_hdmiedid
= CPT_HDMIW_HDMIEDID(pipe
);
6147 aud_config
= CPT_AUD_CFG(pipe
);
6148 aud_cntl_st
= CPT_AUD_CNTL_ST(pipe
);
6149 aud_cntrl_st2
= CPT_AUD_CNTRL_ST2
;
6152 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe
));
6154 i
= I915_READ(aud_cntl_st
);
6155 i
= (i
>> 29) & DIP_PORT_SEL_MASK
; /* DIP_Port_Select, 0x1 = PortB */
6157 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
6158 /* operate blindly on all ports */
6159 eldv
= IBX_ELD_VALIDB
;
6160 eldv
|= IBX_ELD_VALIDB
<< 4;
6161 eldv
|= IBX_ELD_VALIDB
<< 8;
6163 DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i
);
6164 eldv
= IBX_ELD_VALIDB
<< ((i
- 1) * 4);
6167 if (intel_pipe_has_type(crtc
, INTEL_OUTPUT_DISPLAYPORT
)) {
6168 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6169 eld
[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
6170 I915_WRITE(aud_config
, AUD_CONFIG_N_VALUE_INDEX
); /* 0x1 = DP */
6172 I915_WRITE(aud_config
, 0);
6174 if (intel_eld_uptodate(connector
,
6175 aud_cntrl_st2
, eldv
,
6176 aud_cntl_st
, IBX_ELD_ADDRESS
,
6180 i
= I915_READ(aud_cntrl_st2
);
6182 I915_WRITE(aud_cntrl_st2
, i
);
6187 i
= I915_READ(aud_cntl_st
);
6188 i
&= ~IBX_ELD_ADDRESS
;
6189 I915_WRITE(aud_cntl_st
, i
);
6191 len
= min_t(uint8_t, eld
[2], 21); /* 84 bytes of hw ELD buffer */
6192 DRM_DEBUG_DRIVER("ELD size %d\n", len
);
6193 for (i
= 0; i
< len
; i
++)
6194 I915_WRITE(hdmiw_hdmiedid
, *((uint32_t *)eld
+ i
));
6196 i
= I915_READ(aud_cntrl_st2
);
6198 I915_WRITE(aud_cntrl_st2
, i
);
6201 void intel_write_eld(struct drm_encoder
*encoder
,
6202 struct drm_display_mode
*mode
)
6204 struct drm_crtc
*crtc
= encoder
->crtc
;
6205 struct drm_connector
*connector
;
6206 struct drm_device
*dev
= encoder
->dev
;
6207 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6209 connector
= drm_select_eld(encoder
, mode
);
6213 DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6215 drm_get_connector_name(connector
),
6216 connector
->encoder
->base
.id
,
6217 drm_get_encoder_name(connector
->encoder
));
6219 connector
->eld
[6] = drm_av_sync_delay(connector
, mode
) / 2;
6221 if (dev_priv
->display
.write_eld
)
6222 dev_priv
->display
.write_eld(connector
, crtc
);
6225 /** Loads the palette/gamma unit for the CRTC with the prepared values */
6226 void intel_crtc_load_lut(struct drm_crtc
*crtc
)
6228 struct drm_device
*dev
= crtc
->dev
;
6229 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6230 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6231 int palreg
= PALETTE(intel_crtc
->pipe
);
6234 /* The clocks have to be on to load the palette. */
6235 if (!crtc
->enabled
|| !intel_crtc
->active
)
6238 /* use legacy palette for Ironlake */
6239 if (HAS_PCH_SPLIT(dev
))
6240 palreg
= LGC_PALETTE(intel_crtc
->pipe
);
6242 for (i
= 0; i
< 256; i
++) {
6243 I915_WRITE(palreg
+ 4 * i
,
6244 (intel_crtc
->lut_r
[i
] << 16) |
6245 (intel_crtc
->lut_g
[i
] << 8) |
6246 intel_crtc
->lut_b
[i
]);
6250 static void i845_update_cursor(struct drm_crtc
*crtc
, u32 base
)
6252 struct drm_device
*dev
= crtc
->dev
;
6253 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6254 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6255 bool visible
= base
!= 0;
6258 if (intel_crtc
->cursor_visible
== visible
)
6261 cntl
= I915_READ(_CURACNTR
);
6263 /* On these chipsets we can only modify the base whilst
6264 * the cursor is disabled.
6266 I915_WRITE(_CURABASE
, base
);
6268 cntl
&= ~(CURSOR_FORMAT_MASK
);
6269 /* XXX width must be 64, stride 256 => 0x00 << 28 */
6270 cntl
|= CURSOR_ENABLE
|
6271 CURSOR_GAMMA_ENABLE
|
6274 cntl
&= ~(CURSOR_ENABLE
| CURSOR_GAMMA_ENABLE
);
6275 I915_WRITE(_CURACNTR
, cntl
);
6277 intel_crtc
->cursor_visible
= visible
;
6280 static void i9xx_update_cursor(struct drm_crtc
*crtc
, u32 base
)
6282 struct drm_device
*dev
= crtc
->dev
;
6283 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6284 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6285 int pipe
= intel_crtc
->pipe
;
6286 bool visible
= base
!= 0;
6288 if (intel_crtc
->cursor_visible
!= visible
) {
6289 uint32_t cntl
= I915_READ(CURCNTR(pipe
));
6291 cntl
&= ~(CURSOR_MODE
| MCURSOR_PIPE_SELECT
);
6292 cntl
|= CURSOR_MODE_64_ARGB_AX
| MCURSOR_GAMMA_ENABLE
;
6293 cntl
|= pipe
<< 28; /* Connect to correct pipe */
6295 cntl
&= ~(CURSOR_MODE
| MCURSOR_GAMMA_ENABLE
);
6296 cntl
|= CURSOR_MODE_DISABLE
;
6298 I915_WRITE(CURCNTR(pipe
), cntl
);
6300 intel_crtc
->cursor_visible
= visible
;
6302 /* and commit changes on next vblank */
6303 I915_WRITE(CURBASE(pipe
), base
);
6306 static void ivb_update_cursor(struct drm_crtc
*crtc
, u32 base
)
6308 struct drm_device
*dev
= crtc
->dev
;
6309 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6310 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6311 int pipe
= intel_crtc
->pipe
;
6312 bool visible
= base
!= 0;
6314 if (intel_crtc
->cursor_visible
!= visible
) {
6315 uint32_t cntl
= I915_READ(CURCNTR_IVB(pipe
));
6317 cntl
&= ~CURSOR_MODE
;
6318 cntl
|= CURSOR_MODE_64_ARGB_AX
| MCURSOR_GAMMA_ENABLE
;
6320 cntl
&= ~(CURSOR_MODE
| MCURSOR_GAMMA_ENABLE
);
6321 cntl
|= CURSOR_MODE_DISABLE
;
6323 if (IS_HASWELL(dev
))
6324 cntl
|= CURSOR_PIPE_CSC_ENABLE
;
6325 I915_WRITE(CURCNTR_IVB(pipe
), cntl
);
6327 intel_crtc
->cursor_visible
= visible
;
6329 /* and commit changes on next vblank */
6330 I915_WRITE(CURBASE_IVB(pipe
), base
);
6333 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
6334 static void intel_crtc_update_cursor(struct drm_crtc
*crtc
,
6337 struct drm_device
*dev
= crtc
->dev
;
6338 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6339 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6340 int pipe
= intel_crtc
->pipe
;
6341 int x
= intel_crtc
->cursor_x
;
6342 int y
= intel_crtc
->cursor_y
;
6348 if (on
&& crtc
->enabled
&& crtc
->fb
) {
6349 base
= intel_crtc
->cursor_addr
;
6350 if (x
> (int) crtc
->fb
->width
)
6353 if (y
> (int) crtc
->fb
->height
)
6359 if (x
+ intel_crtc
->cursor_width
< 0)
6362 pos
|= CURSOR_POS_SIGN
<< CURSOR_X_SHIFT
;
6365 pos
|= x
<< CURSOR_X_SHIFT
;
6368 if (y
+ intel_crtc
->cursor_height
< 0)
6371 pos
|= CURSOR_POS_SIGN
<< CURSOR_Y_SHIFT
;
6374 pos
|= y
<< CURSOR_Y_SHIFT
;
6376 visible
= base
!= 0;
6377 if (!visible
&& !intel_crtc
->cursor_visible
)
6380 if (IS_IVYBRIDGE(dev
) || IS_HASWELL(dev
)) {
6381 I915_WRITE(CURPOS_IVB(pipe
), pos
);
6382 ivb_update_cursor(crtc
, base
);
6384 I915_WRITE(CURPOS(pipe
), pos
);
6385 if (IS_845G(dev
) || IS_I865G(dev
))
6386 i845_update_cursor(crtc
, base
);
6388 i9xx_update_cursor(crtc
, base
);
6392 static int intel_crtc_cursor_set(struct drm_crtc
*crtc
,
6393 struct drm_file
*file
,
6395 uint32_t width
, uint32_t height
)
6397 struct drm_device
*dev
= crtc
->dev
;
6398 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6399 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6400 struct drm_i915_gem_object
*obj
;
6404 /* if we want to turn off the cursor ignore width and height */
6406 DRM_DEBUG_KMS("cursor off\n");
6409 mutex_lock(&dev
->struct_mutex
);
6413 /* Currently we only support 64x64 cursors */
6414 if (width
!= 64 || height
!= 64) {
6415 DRM_ERROR("we currently only support 64x64 cursors\n");
6419 obj
= to_intel_bo(drm_gem_object_lookup(dev
, file
, handle
));
6420 if (&obj
->base
== NULL
)
6423 if (obj
->base
.size
< width
* height
* 4) {
6424 DRM_ERROR("buffer is to small\n");
6429 /* we only need to pin inside GTT if cursor is non-phy */
6430 mutex_lock(&dev
->struct_mutex
);
6431 if (!dev_priv
->info
->cursor_needs_physical
) {
6434 if (obj
->tiling_mode
) {
6435 DRM_ERROR("cursor cannot be tiled\n");
6440 /* Note that the w/a also requires 2 PTE of padding following
6441 * the bo. We currently fill all unused PTE with the shadow
6442 * page and so we should always have valid PTE following the
6443 * cursor preventing the VT-d warning.
6446 if (need_vtd_wa(dev
))
6447 alignment
= 64*1024;
6449 ret
= i915_gem_object_pin_to_display_plane(obj
, alignment
, NULL
);
6451 DRM_ERROR("failed to move cursor bo into the GTT\n");
6455 ret
= i915_gem_object_put_fence(obj
);
6457 DRM_ERROR("failed to release fence for cursor");
6461 addr
= obj
->gtt_offset
;
6463 int align
= IS_I830(dev
) ? 16 * 1024 : 256;
6464 ret
= i915_gem_attach_phys_object(dev
, obj
,
6465 (intel_crtc
->pipe
== 0) ? I915_GEM_PHYS_CURSOR_0
: I915_GEM_PHYS_CURSOR_1
,
6468 DRM_ERROR("failed to attach phys object\n");
6471 addr
= obj
->phys_obj
->handle
->busaddr
;
6475 I915_WRITE(CURSIZE
, (height
<< 12) | width
);
6478 if (intel_crtc
->cursor_bo
) {
6479 if (dev_priv
->info
->cursor_needs_physical
) {
6480 if (intel_crtc
->cursor_bo
!= obj
)
6481 i915_gem_detach_phys_object(dev
, intel_crtc
->cursor_bo
);
6483 i915_gem_object_unpin(intel_crtc
->cursor_bo
);
6484 drm_gem_object_unreference(&intel_crtc
->cursor_bo
->base
);
6487 mutex_unlock(&dev
->struct_mutex
);
6489 intel_crtc
->cursor_addr
= addr
;
6490 intel_crtc
->cursor_bo
= obj
;
6491 intel_crtc
->cursor_width
= width
;
6492 intel_crtc
->cursor_height
= height
;
6494 intel_crtc_update_cursor(crtc
, true);
6498 i915_gem_object_unpin(obj
);
6500 mutex_unlock(&dev
->struct_mutex
);
6502 drm_gem_object_unreference_unlocked(&obj
->base
);
6506 static int intel_crtc_cursor_move(struct drm_crtc
*crtc
, int x
, int y
)
6508 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6510 intel_crtc
->cursor_x
= x
;
6511 intel_crtc
->cursor_y
= y
;
6513 intel_crtc_update_cursor(crtc
, true);
6518 /** Sets the color ramps on behalf of RandR */
6519 void intel_crtc_fb_gamma_set(struct drm_crtc
*crtc
, u16 red
, u16 green
,
6520 u16 blue
, int regno
)
6522 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6524 intel_crtc
->lut_r
[regno
] = red
>> 8;
6525 intel_crtc
->lut_g
[regno
] = green
>> 8;
6526 intel_crtc
->lut_b
[regno
] = blue
>> 8;
6529 void intel_crtc_fb_gamma_get(struct drm_crtc
*crtc
, u16
*red
, u16
*green
,
6530 u16
*blue
, int regno
)
6532 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6534 *red
= intel_crtc
->lut_r
[regno
] << 8;
6535 *green
= intel_crtc
->lut_g
[regno
] << 8;
6536 *blue
= intel_crtc
->lut_b
[regno
] << 8;
6539 static void intel_crtc_gamma_set(struct drm_crtc
*crtc
, u16
*red
, u16
*green
,
6540 u16
*blue
, uint32_t start
, uint32_t size
)
6542 int end
= (start
+ size
> 256) ? 256 : start
+ size
, i
;
6543 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6545 for (i
= start
; i
< end
; i
++) {
6546 intel_crtc
->lut_r
[i
] = red
[i
] >> 8;
6547 intel_crtc
->lut_g
[i
] = green
[i
] >> 8;
6548 intel_crtc
->lut_b
[i
] = blue
[i
] >> 8;
6551 intel_crtc_load_lut(crtc
);
6554 /* VESA 640x480x72Hz mode to set on the pipe */
6555 static struct drm_display_mode load_detect_mode
= {
6556 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT
, 31500, 640, 664,
6557 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC
| DRM_MODE_FLAG_NVSYNC
),
6560 static struct drm_framebuffer
*
6561 intel_framebuffer_create(struct drm_device
*dev
,
6562 struct drm_mode_fb_cmd2
*mode_cmd
,
6563 struct drm_i915_gem_object
*obj
)
6565 struct intel_framebuffer
*intel_fb
;
6568 intel_fb
= kzalloc(sizeof(*intel_fb
), GFP_KERNEL
);
6570 drm_gem_object_unreference_unlocked(&obj
->base
);
6571 return ERR_PTR(-ENOMEM
);
6574 ret
= intel_framebuffer_init(dev
, intel_fb
, mode_cmd
, obj
);
6576 drm_gem_object_unreference_unlocked(&obj
->base
);
6578 return ERR_PTR(ret
);
6581 return &intel_fb
->base
;
6585 intel_framebuffer_pitch_for_width(int width
, int bpp
)
6587 u32 pitch
= DIV_ROUND_UP(width
* bpp
, 8);
6588 return ALIGN(pitch
, 64);
6592 intel_framebuffer_size_for_mode(struct drm_display_mode
*mode
, int bpp
)
6594 u32 pitch
= intel_framebuffer_pitch_for_width(mode
->hdisplay
, bpp
);
6595 return ALIGN(pitch
* mode
->vdisplay
, PAGE_SIZE
);
6598 static struct drm_framebuffer
*
6599 intel_framebuffer_create_for_mode(struct drm_device
*dev
,
6600 struct drm_display_mode
*mode
,
6603 struct drm_i915_gem_object
*obj
;
6604 struct drm_mode_fb_cmd2 mode_cmd
= { 0 };
6606 obj
= i915_gem_alloc_object(dev
,
6607 intel_framebuffer_size_for_mode(mode
, bpp
));
6609 return ERR_PTR(-ENOMEM
);
6611 mode_cmd
.width
= mode
->hdisplay
;
6612 mode_cmd
.height
= mode
->vdisplay
;
6613 mode_cmd
.pitches
[0] = intel_framebuffer_pitch_for_width(mode_cmd
.width
,
6615 mode_cmd
.pixel_format
= drm_mode_legacy_fb_format(bpp
, depth
);
6617 return intel_framebuffer_create(dev
, &mode_cmd
, obj
);
6620 static struct drm_framebuffer
*
6621 mode_fits_in_fbdev(struct drm_device
*dev
,
6622 struct drm_display_mode
*mode
)
6624 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6625 struct drm_i915_gem_object
*obj
;
6626 struct drm_framebuffer
*fb
;
6628 if (dev_priv
->fbdev
== NULL
)
6631 obj
= dev_priv
->fbdev
->ifb
.obj
;
6635 fb
= &dev_priv
->fbdev
->ifb
.base
;
6636 if (fb
->pitches
[0] < intel_framebuffer_pitch_for_width(mode
->hdisplay
,
6637 fb
->bits_per_pixel
))
6640 if (obj
->base
.size
< mode
->vdisplay
* fb
->pitches
[0])
6646 bool intel_get_load_detect_pipe(struct drm_connector
*connector
,
6647 struct drm_display_mode
*mode
,
6648 struct intel_load_detect_pipe
*old
)
6650 struct intel_crtc
*intel_crtc
;
6651 struct intel_encoder
*intel_encoder
=
6652 intel_attached_encoder(connector
);
6653 struct drm_crtc
*possible_crtc
;
6654 struct drm_encoder
*encoder
= &intel_encoder
->base
;
6655 struct drm_crtc
*crtc
= NULL
;
6656 struct drm_device
*dev
= encoder
->dev
;
6657 struct drm_framebuffer
*fb
;
6660 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6661 connector
->base
.id
, drm_get_connector_name(connector
),
6662 encoder
->base
.id
, drm_get_encoder_name(encoder
));
6665 * Algorithm gets a little messy:
6667 * - if the connector already has an assigned crtc, use it (but make
6668 * sure it's on first)
6670 * - try to find the first unused crtc that can drive this connector,
6671 * and use that if we find one
6674 /* See if we already have a CRTC for this connector */
6675 if (encoder
->crtc
) {
6676 crtc
= encoder
->crtc
;
6678 mutex_lock(&crtc
->mutex
);
6680 old
->dpms_mode
= connector
->dpms
;
6681 old
->load_detect_temp
= false;
6683 /* Make sure the crtc and connector are running */
6684 if (connector
->dpms
!= DRM_MODE_DPMS_ON
)
6685 connector
->funcs
->dpms(connector
, DRM_MODE_DPMS_ON
);
6690 /* Find an unused one (if possible) */
6691 list_for_each_entry(possible_crtc
, &dev
->mode_config
.crtc_list
, head
) {
6693 if (!(encoder
->possible_crtcs
& (1 << i
)))
6695 if (!possible_crtc
->enabled
) {
6696 crtc
= possible_crtc
;
6702 * If we didn't find an unused CRTC, don't use any.
6705 DRM_DEBUG_KMS("no pipe available for load-detect\n");
6709 mutex_lock(&crtc
->mutex
);
6710 intel_encoder
->new_crtc
= to_intel_crtc(crtc
);
6711 to_intel_connector(connector
)->new_encoder
= intel_encoder
;
6713 intel_crtc
= to_intel_crtc(crtc
);
6714 old
->dpms_mode
= connector
->dpms
;
6715 old
->load_detect_temp
= true;
6716 old
->release_fb
= NULL
;
6719 mode
= &load_detect_mode
;
6721 /* We need a framebuffer large enough to accommodate all accesses
6722 * that the plane may generate whilst we perform load detection.
6723 * We can not rely on the fbcon either being present (we get called
6724 * during its initialisation to detect all boot displays, or it may
6725 * not even exist) or that it is large enough to satisfy the
6728 fb
= mode_fits_in_fbdev(dev
, mode
);
6730 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
6731 fb
= intel_framebuffer_create_for_mode(dev
, mode
, 24, 32);
6732 old
->release_fb
= fb
;
6734 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
6736 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
6737 mutex_unlock(&crtc
->mutex
);
6741 if (intel_set_mode(crtc
, mode
, 0, 0, fb
)) {
6742 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
6743 if (old
->release_fb
)
6744 old
->release_fb
->funcs
->destroy(old
->release_fb
);
6745 mutex_unlock(&crtc
->mutex
);
6749 /* let the connector get through one full cycle before testing */
6750 intel_wait_for_vblank(dev
, intel_crtc
->pipe
);
6754 void intel_release_load_detect_pipe(struct drm_connector
*connector
,
6755 struct intel_load_detect_pipe
*old
)
6757 struct intel_encoder
*intel_encoder
=
6758 intel_attached_encoder(connector
);
6759 struct drm_encoder
*encoder
= &intel_encoder
->base
;
6760 struct drm_crtc
*crtc
= encoder
->crtc
;
6762 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6763 connector
->base
.id
, drm_get_connector_name(connector
),
6764 encoder
->base
.id
, drm_get_encoder_name(encoder
));
6766 if (old
->load_detect_temp
) {
6767 to_intel_connector(connector
)->new_encoder
= NULL
;
6768 intel_encoder
->new_crtc
= NULL
;
6769 intel_set_mode(crtc
, NULL
, 0, 0, NULL
);
6771 if (old
->release_fb
) {
6772 drm_framebuffer_unregister_private(old
->release_fb
);
6773 drm_framebuffer_unreference(old
->release_fb
);
6776 mutex_unlock(&crtc
->mutex
);
6780 /* Switch crtc and encoder back off if necessary */
6781 if (old
->dpms_mode
!= DRM_MODE_DPMS_ON
)
6782 connector
->funcs
->dpms(connector
, old
->dpms_mode
);
6784 mutex_unlock(&crtc
->mutex
);
6787 /* Returns the clock of the currently programmed mode of the given pipe. */
6788 static int intel_crtc_clock_get(struct drm_device
*dev
, struct drm_crtc
*crtc
)
6790 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6791 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6792 int pipe
= intel_crtc
->pipe
;
6793 u32 dpll
= I915_READ(DPLL(pipe
));
6795 intel_clock_t clock
;
6797 if ((dpll
& DISPLAY_RATE_SELECT_FPA1
) == 0)
6798 fp
= I915_READ(FP0(pipe
));
6800 fp
= I915_READ(FP1(pipe
));
6802 clock
.m1
= (fp
& FP_M1_DIV_MASK
) >> FP_M1_DIV_SHIFT
;
6803 if (IS_PINEVIEW(dev
)) {
6804 clock
.n
= ffs((fp
& FP_N_PINEVIEW_DIV_MASK
) >> FP_N_DIV_SHIFT
) - 1;
6805 clock
.m2
= (fp
& FP_M2_PINEVIEW_DIV_MASK
) >> FP_M2_DIV_SHIFT
;
6807 clock
.n
= (fp
& FP_N_DIV_MASK
) >> FP_N_DIV_SHIFT
;
6808 clock
.m2
= (fp
& FP_M2_DIV_MASK
) >> FP_M2_DIV_SHIFT
;
6811 if (!IS_GEN2(dev
)) {
6812 if (IS_PINEVIEW(dev
))
6813 clock
.p1
= ffs((dpll
& DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW
) >>
6814 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW
);
6816 clock
.p1
= ffs((dpll
& DPLL_FPA01_P1_POST_DIV_MASK
) >>
6817 DPLL_FPA01_P1_POST_DIV_SHIFT
);
6819 switch (dpll
& DPLL_MODE_MASK
) {
6820 case DPLLB_MODE_DAC_SERIAL
:
6821 clock
.p2
= dpll
& DPLL_DAC_SERIAL_P2_CLOCK_DIV_5
?
6824 case DPLLB_MODE_LVDS
:
6825 clock
.p2
= dpll
& DPLLB_LVDS_P2_CLOCK_DIV_7
?
6829 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
6830 "mode\n", (int)(dpll
& DPLL_MODE_MASK
));
6834 /* XXX: Handle the 100Mhz refclk */
6835 intel_clock(dev
, 96000, &clock
);
6837 bool is_lvds
= (pipe
== 1) && (I915_READ(LVDS
) & LVDS_PORT_EN
);
6840 clock
.p1
= ffs((dpll
& DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS
) >>
6841 DPLL_FPA01_P1_POST_DIV_SHIFT
);
6844 if ((dpll
& PLL_REF_INPUT_MASK
) ==
6845 PLLB_REF_INPUT_SPREADSPECTRUMIN
) {
6846 /* XXX: might not be 66MHz */
6847 intel_clock(dev
, 66000, &clock
);
6849 intel_clock(dev
, 48000, &clock
);
6851 if (dpll
& PLL_P1_DIVIDE_BY_TWO
)
6854 clock
.p1
= ((dpll
& DPLL_FPA01_P1_POST_DIV_MASK_I830
) >>
6855 DPLL_FPA01_P1_POST_DIV_SHIFT
) + 2;
6857 if (dpll
& PLL_P2_DIVIDE_BY_4
)
6862 intel_clock(dev
, 48000, &clock
);
6866 /* XXX: It would be nice to validate the clocks, but we can't reuse
6867 * i830PllIsValid() because it relies on the xf86_config connector
6868 * configuration being accurate, which it isn't necessarily.
6874 /** Returns the currently programmed mode of the given pipe. */
6875 struct drm_display_mode
*intel_crtc_mode_get(struct drm_device
*dev
,
6876 struct drm_crtc
*crtc
)
6878 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6879 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6880 enum transcoder cpu_transcoder
= intel_crtc
->config
.cpu_transcoder
;
6881 struct drm_display_mode
*mode
;
6882 int htot
= I915_READ(HTOTAL(cpu_transcoder
));
6883 int hsync
= I915_READ(HSYNC(cpu_transcoder
));
6884 int vtot
= I915_READ(VTOTAL(cpu_transcoder
));
6885 int vsync
= I915_READ(VSYNC(cpu_transcoder
));
6887 mode
= kzalloc(sizeof(*mode
), GFP_KERNEL
);
6891 mode
->clock
= intel_crtc_clock_get(dev
, crtc
);
6892 mode
->hdisplay
= (htot
& 0xffff) + 1;
6893 mode
->htotal
= ((htot
& 0xffff0000) >> 16) + 1;
6894 mode
->hsync_start
= (hsync
& 0xffff) + 1;
6895 mode
->hsync_end
= ((hsync
& 0xffff0000) >> 16) + 1;
6896 mode
->vdisplay
= (vtot
& 0xffff) + 1;
6897 mode
->vtotal
= ((vtot
& 0xffff0000) >> 16) + 1;
6898 mode
->vsync_start
= (vsync
& 0xffff) + 1;
6899 mode
->vsync_end
= ((vsync
& 0xffff0000) >> 16) + 1;
6901 drm_mode_set_name(mode
);
6906 static void intel_increase_pllclock(struct drm_crtc
*crtc
)
6908 struct drm_device
*dev
= crtc
->dev
;
6909 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
6910 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6911 int pipe
= intel_crtc
->pipe
;
6912 int dpll_reg
= DPLL(pipe
);
6915 if (HAS_PCH_SPLIT(dev
))
6918 if (!dev_priv
->lvds_downclock_avail
)
6921 dpll
= I915_READ(dpll_reg
);
6922 if (!HAS_PIPE_CXSR(dev
) && (dpll
& DISPLAY_RATE_SELECT_FPA1
)) {
6923 DRM_DEBUG_DRIVER("upclocking LVDS\n");
6925 assert_panel_unlocked(dev_priv
, pipe
);
6927 dpll
&= ~DISPLAY_RATE_SELECT_FPA1
;
6928 I915_WRITE(dpll_reg
, dpll
);
6929 intel_wait_for_vblank(dev
, pipe
);
6931 dpll
= I915_READ(dpll_reg
);
6932 if (dpll
& DISPLAY_RATE_SELECT_FPA1
)
6933 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
6937 static void intel_decrease_pllclock(struct drm_crtc
*crtc
)
6939 struct drm_device
*dev
= crtc
->dev
;
6940 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
6941 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
6943 if (HAS_PCH_SPLIT(dev
))
6946 if (!dev_priv
->lvds_downclock_avail
)
6950 * Since this is called by a timer, we should never get here in
6953 if (!HAS_PIPE_CXSR(dev
) && intel_crtc
->lowfreq_avail
) {
6954 int pipe
= intel_crtc
->pipe
;
6955 int dpll_reg
= DPLL(pipe
);
6958 DRM_DEBUG_DRIVER("downclocking LVDS\n");
6960 assert_panel_unlocked(dev_priv
, pipe
);
6962 dpll
= I915_READ(dpll_reg
);
6963 dpll
|= DISPLAY_RATE_SELECT_FPA1
;
6964 I915_WRITE(dpll_reg
, dpll
);
6965 intel_wait_for_vblank(dev
, pipe
);
6966 dpll
= I915_READ(dpll_reg
);
6967 if (!(dpll
& DISPLAY_RATE_SELECT_FPA1
))
6968 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
6972 void intel_mark_busy(struct drm_device
*dev
)
6974 i915_update_gfx_val(dev
->dev_private
);
6977 void intel_mark_idle(struct drm_device
*dev
)
6979 struct drm_crtc
*crtc
;
6981 if (!i915_powersave
)
6984 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
6988 intel_decrease_pllclock(crtc
);
6992 void intel_mark_fb_busy(struct drm_i915_gem_object
*obj
)
6994 struct drm_device
*dev
= obj
->base
.dev
;
6995 struct drm_crtc
*crtc
;
6997 if (!i915_powersave
)
7000 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
7004 if (to_intel_framebuffer(crtc
->fb
)->obj
== obj
)
7005 intel_increase_pllclock(crtc
);
7009 static void intel_crtc_destroy(struct drm_crtc
*crtc
)
7011 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
7012 struct drm_device
*dev
= crtc
->dev
;
7013 struct intel_unpin_work
*work
;
7015 lockmgr(&dev
->event_lock
, LK_EXCLUSIVE
);
7016 work
= intel_crtc
->unpin_work
;
7017 intel_crtc
->unpin_work
= NULL
;
7018 lockmgr(&dev
->event_lock
, LK_RELEASE
);
7021 cancel_work_sync(&work
->work
);
7025 drm_crtc_cleanup(crtc
);
7027 drm_free(intel_crtc
, M_DRM
);
7030 static void intel_unpin_work_fn(struct work_struct
*__work
)
7032 struct intel_unpin_work
*work
=
7033 container_of(__work
, struct intel_unpin_work
, work
);
7034 struct drm_device
*dev
= work
->crtc
->dev
;
7036 mutex_lock(&dev
->struct_mutex
);
7037 intel_unpin_fb_obj(work
->old_fb_obj
);
7038 drm_gem_object_unreference(&work
->pending_flip_obj
->base
);
7039 drm_gem_object_unreference(&work
->old_fb_obj
->base
);
7041 intel_update_fbc(dev
);
7042 mutex_unlock(&dev
->struct_mutex
);
7044 BUG_ON(atomic_read(&to_intel_crtc(work
->crtc
)->unpin_work_count
) == 0);
7045 atomic_dec(&to_intel_crtc(work
->crtc
)->unpin_work_count
);
7047 drm_free(work
, M_DRM
);
7050 static void do_intel_finish_page_flip(struct drm_device
*dev
,
7051 struct drm_crtc
*crtc
)
7053 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
7054 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
7055 struct intel_unpin_work
*work
;
7057 /* Ignore early vblank irqs */
7058 if (intel_crtc
== NULL
)
7061 lockmgr(&dev
->event_lock
, LK_EXCLUSIVE
);
7062 work
= intel_crtc
->unpin_work
;
7064 /* Ensure we don't miss a work->pending update ... */
7067 if (work
== NULL
|| atomic_read(&work
->pending
) < INTEL_FLIP_COMPLETE
) {
7068 lockmgr(&dev
->event_lock
, LK_RELEASE
);
7072 /* and that the unpin work is consistent wrt ->pending. */
7075 intel_crtc
->unpin_work
= NULL
;
7078 drm_send_vblank_event(dev
, intel_crtc
->pipe
, work
->event
);
7080 drm_vblank_put(dev
, intel_crtc
->pipe
);
7082 lockmgr(&dev
->event_lock
, LK_RELEASE
);
7084 wake_up_all(&dev_priv
->pending_flip_queue
);
7086 queue_work(dev_priv
->wq
, &work
->work
);
7089 void intel_finish_page_flip(struct drm_device
*dev
, int pipe
)
7091 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
7092 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
7094 do_intel_finish_page_flip(dev
, crtc
);
7097 void intel_finish_page_flip_plane(struct drm_device
*dev
, int plane
)
7099 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
7100 struct drm_crtc
*crtc
= dev_priv
->plane_to_crtc_mapping
[plane
];
7102 do_intel_finish_page_flip(dev
, crtc
);
7105 void intel_prepare_page_flip(struct drm_device
*dev
, int plane
)
7107 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
7108 struct intel_crtc
*intel_crtc
=
7109 to_intel_crtc(dev_priv
->plane_to_crtc_mapping
[plane
]);
7111 /* NB: An MMIO update of the plane base pointer will also
7112 * generate a page-flip completion irq, i.e. every modeset
7113 * is also accompanied by a spurious intel_prepare_page_flip().
7115 lockmgr(&dev
->event_lock
, LK_EXCLUSIVE
);
7116 if (intel_crtc
->unpin_work
)
7117 atomic_inc_not_zero(&intel_crtc
->unpin_work
->pending
);
7118 lockmgr(&dev
->event_lock
, LK_RELEASE
);
7121 inline static void intel_mark_page_flip_active(struct intel_crtc
*intel_crtc
)
7123 /* Ensure that the work item is consistent when activating it ... */
7125 atomic_set(&intel_crtc
->unpin_work
->pending
, INTEL_FLIP_PENDING
);
7126 /* and that it is marked active as soon as the irq could fire. */
7130 static int intel_gen2_queue_flip(struct drm_device
*dev
,
7131 struct drm_crtc
*crtc
,
7132 struct drm_framebuffer
*fb
,
7133 struct drm_i915_gem_object
*obj
)
7135 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7136 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
7138 struct intel_ring_buffer
*ring
= &dev_priv
->ring
[RCS
];
7141 ret
= intel_pin_and_fence_fb_obj(dev
, obj
, ring
);
7145 ret
= intel_ring_begin(ring
, 6);
7149 /* Can't queue multiple flips, so wait for the previous
7150 * one to finish before executing the next.
7152 if (intel_crtc
->plane
)
7153 flip_mask
= MI_WAIT_FOR_PLANE_B_FLIP
;
7155 flip_mask
= MI_WAIT_FOR_PLANE_A_FLIP
;
7156 intel_ring_emit(ring
, MI_WAIT_FOR_EVENT
| flip_mask
);
7157 intel_ring_emit(ring
, MI_NOOP
);
7158 intel_ring_emit(ring
, MI_DISPLAY_FLIP
|
7159 MI_DISPLAY_FLIP_PLANE(intel_crtc
->plane
));
7160 intel_ring_emit(ring
, fb
->pitches
[0]);
7161 intel_ring_emit(ring
, obj
->gtt_offset
+ intel_crtc
->dspaddr_offset
);
7162 intel_ring_emit(ring
, 0); /* aux display base address, unused */
7164 intel_mark_page_flip_active(intel_crtc
);
7165 intel_ring_advance(ring
);
7169 intel_unpin_fb_obj(obj
);
7174 static int intel_gen3_queue_flip(struct drm_device
*dev
,
7175 struct drm_crtc
*crtc
,
7176 struct drm_framebuffer
*fb
,
7177 struct drm_i915_gem_object
*obj
)
7179 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7180 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
7182 struct intel_ring_buffer
*ring
= &dev_priv
->ring
[RCS
];
7185 ret
= intel_pin_and_fence_fb_obj(dev
, obj
, ring
);
7189 ret
= intel_ring_begin(ring
, 6);
7193 if (intel_crtc
->plane
)
7194 flip_mask
= MI_WAIT_FOR_PLANE_B_FLIP
;
7196 flip_mask
= MI_WAIT_FOR_PLANE_A_FLIP
;
7197 intel_ring_emit(ring
, MI_WAIT_FOR_EVENT
| flip_mask
);
7198 intel_ring_emit(ring
, MI_NOOP
);
7199 intel_ring_emit(ring
, MI_DISPLAY_FLIP_I915
|
7200 MI_DISPLAY_FLIP_PLANE(intel_crtc
->plane
));
7201 intel_ring_emit(ring
, fb
->pitches
[0]);
7202 intel_ring_emit(ring
, obj
->gtt_offset
+ intel_crtc
->dspaddr_offset
);
7203 intel_ring_emit(ring
, MI_NOOP
);
7205 intel_mark_page_flip_active(intel_crtc
);
7206 intel_ring_advance(ring
);
7210 intel_unpin_fb_obj(obj
);
7215 static int intel_gen4_queue_flip(struct drm_device
*dev
,
7216 struct drm_crtc
*crtc
,
7217 struct drm_framebuffer
*fb
,
7218 struct drm_i915_gem_object
*obj
)
7220 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7221 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
7222 uint32_t pf
, pipesrc
;
7223 struct intel_ring_buffer
*ring
= &dev_priv
->ring
[RCS
];
7226 ret
= intel_pin_and_fence_fb_obj(dev
, obj
, ring
);
7230 ret
= intel_ring_begin(ring
, 4);
7234 /* i965+ uses the linear or tiled offsets from the
7235 * Display Registers (which do not change across a page-flip)
7236 * so we need only reprogram the base address.
7238 intel_ring_emit(ring
, MI_DISPLAY_FLIP
|
7239 MI_DISPLAY_FLIP_PLANE(intel_crtc
->plane
));
7240 intel_ring_emit(ring
, fb
->pitches
[0]);
7241 intel_ring_emit(ring
,
7242 (obj
->gtt_offset
+ intel_crtc
->dspaddr_offset
) |
7245 /* XXX Enabling the panel-fitter across page-flip is so far
7246 * untested on non-native modes, so ignore it for now.
7247 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
7250 pipesrc
= I915_READ(PIPESRC(intel_crtc
->pipe
)) & 0x0fff0fff;
7251 intel_ring_emit(ring
, pf
| pipesrc
);
7253 intel_mark_page_flip_active(intel_crtc
);
7254 intel_ring_advance(ring
);
7258 intel_unpin_fb_obj(obj
);
7263 static int intel_gen6_queue_flip(struct drm_device
*dev
,
7264 struct drm_crtc
*crtc
,
7265 struct drm_framebuffer
*fb
,
7266 struct drm_i915_gem_object
*obj
)
7268 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7269 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
7270 struct intel_ring_buffer
*ring
= &dev_priv
->ring
[RCS
];
7271 uint32_t pf
, pipesrc
;
7274 ret
= intel_pin_and_fence_fb_obj(dev
, obj
, ring
);
7278 ret
= intel_ring_begin(ring
, 4);
7282 intel_ring_emit(ring
, MI_DISPLAY_FLIP
|
7283 MI_DISPLAY_FLIP_PLANE(intel_crtc
->plane
));
7284 intel_ring_emit(ring
, fb
->pitches
[0] | obj
->tiling_mode
);
7285 intel_ring_emit(ring
, obj
->gtt_offset
+ intel_crtc
->dspaddr_offset
);
7287 /* Contrary to the suggestions in the documentation,
7288 * "Enable Panel Fitter" does not seem to be required when page
7289 * flipping with a non-native mode, and worse causes a normal
7291 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
7294 pipesrc
= I915_READ(PIPESRC(intel_crtc
->pipe
)) & 0x0fff0fff;
7295 intel_ring_emit(ring
, pf
| pipesrc
);
7297 intel_mark_page_flip_active(intel_crtc
);
7298 intel_ring_advance(ring
);
7302 intel_unpin_fb_obj(obj
);
7308 * On gen7 we currently use the blit ring because (in early silicon at least)
7309 * the render ring doesn't give us interrpts for page flip completion, which
7310 * means clients will hang after the first flip is queued. Fortunately the
7311 * blit ring generates interrupts properly, so use it instead.
7313 static int intel_gen7_queue_flip(struct drm_device
*dev
,
7314 struct drm_crtc
*crtc
,
7315 struct drm_framebuffer
*fb
,
7316 struct drm_i915_gem_object
*obj
)
7318 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7319 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
7320 struct intel_ring_buffer
*ring
= &dev_priv
->ring
[BCS
];
7321 uint32_t plane_bit
= 0;
7324 ret
= intel_pin_and_fence_fb_obj(dev
, obj
, ring
);
7328 switch(intel_crtc
->plane
) {
7330 plane_bit
= MI_DISPLAY_FLIP_IVB_PLANE_A
;
7333 plane_bit
= MI_DISPLAY_FLIP_IVB_PLANE_B
;
7336 plane_bit
= MI_DISPLAY_FLIP_IVB_PLANE_C
;
7339 WARN_ONCE(1, "unknown plane in flip command\n");
7344 ret
= intel_ring_begin(ring
, 4);
7348 intel_ring_emit(ring
, MI_DISPLAY_FLIP_I915
| plane_bit
);
7349 intel_ring_emit(ring
, (fb
->pitches
[0] | obj
->tiling_mode
));
7350 intel_ring_emit(ring
, obj
->gtt_offset
+ intel_crtc
->dspaddr_offset
);
7351 intel_ring_emit(ring
, (MI_NOOP
));
7353 intel_mark_page_flip_active(intel_crtc
);
7354 intel_ring_advance(ring
);
7358 intel_unpin_fb_obj(obj
);
7363 static int intel_default_queue_flip(struct drm_device
*dev
,
7364 struct drm_crtc
*crtc
,
7365 struct drm_framebuffer
*fb
,
7366 struct drm_i915_gem_object
*obj
)
7371 static int intel_crtc_page_flip(struct drm_crtc
*crtc
,
7372 struct drm_framebuffer
*fb
,
7373 struct drm_pending_vblank_event
*event
)
7375 struct drm_device
*dev
= crtc
->dev
;
7376 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7377 struct drm_framebuffer
*old_fb
= crtc
->fb
;
7378 struct drm_i915_gem_object
*obj
= to_intel_framebuffer(fb
)->obj
;
7379 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
7380 struct intel_unpin_work
*work
;
7383 /* Can't change pixel format via MI display flips. */
7384 if (fb
->pixel_format
!= crtc
->fb
->pixel_format
)
7388 * TILEOFF/LINOFF registers can't be changed via MI display flips.
7389 * Note that pitch changes could also affect these register.
7391 if (INTEL_INFO(dev
)->gen
> 3 &&
7392 (fb
->offsets
[0] != crtc
->fb
->offsets
[0] ||
7393 fb
->pitches
[0] != crtc
->fb
->pitches
[0]))
7396 work
= kzalloc(sizeof *work
, GFP_KERNEL
);
7400 work
->event
= event
;
7402 work
->old_fb_obj
= to_intel_framebuffer(old_fb
)->obj
;
7403 INIT_WORK(&work
->work
, intel_unpin_work_fn
);
7405 ret
= drm_vblank_get(dev
, intel_crtc
->pipe
);
7409 /* We borrow the event spin lock for protecting unpin_work */
7410 lockmgr(&dev
->event_lock
, LK_EXCLUSIVE
);
7411 if (intel_crtc
->unpin_work
) {
7412 lockmgr(&dev
->event_lock
, LK_RELEASE
);
7413 drm_free(work
, M_DRM
);
7414 drm_vblank_put(dev
, intel_crtc
->pipe
);
7416 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
7419 intel_crtc
->unpin_work
= work
;
7420 lockmgr(&dev
->event_lock
, LK_RELEASE
);
7422 if (atomic_read(&intel_crtc
->unpin_work_count
) >= 2)
7423 flush_workqueue(dev_priv
->wq
);
7425 ret
= i915_mutex_lock_interruptible(dev
);
7429 /* Reference the objects for the scheduled work. */
7430 drm_gem_object_reference(&work
->old_fb_obj
->base
);
7431 drm_gem_object_reference(&obj
->base
);
7435 work
->pending_flip_obj
= obj
;
7437 work
->enable_stall_check
= true;
7439 atomic_inc(&intel_crtc
->unpin_work_count
);
7440 intel_crtc
->reset_counter
= atomic_read(&dev_priv
->gpu_error
.reset_counter
);
7442 ret
= dev_priv
->display
.queue_flip(dev
, crtc
, fb
, obj
);
7444 goto cleanup_pending
;
7446 intel_disable_fbc(dev
);
7447 intel_mark_fb_busy(obj
);
7448 mutex_unlock(&dev
->struct_mutex
);
7453 atomic_dec(&intel_crtc
->unpin_work_count
);
7455 drm_gem_object_unreference(&work
->old_fb_obj
->base
);
7456 drm_gem_object_unreference(&obj
->base
);
7457 mutex_unlock(&dev
->struct_mutex
);
7460 lockmgr(&dev
->event_lock
, LK_EXCLUSIVE
);
7461 intel_crtc
->unpin_work
= NULL
;
7462 lockmgr(&dev
->event_lock
, LK_RELEASE
);
7464 drm_vblank_put(dev
, intel_crtc
->pipe
);
7466 drm_free(work
, M_DRM
);
7471 static struct drm_crtc_helper_funcs intel_helper_funcs
= {
7472 .mode_set_base_atomic
= intel_pipe_set_base_atomic
,
7473 .load_lut
= intel_crtc_load_lut
,
7474 .disable
= intel_crtc_noop
,
7477 bool intel_encoder_check_is_cloned(struct intel_encoder
*encoder
)
7479 struct intel_encoder
*other_encoder
;
7480 struct drm_crtc
*crtc
= &encoder
->new_crtc
->base
;
7485 list_for_each_entry(other_encoder
,
7486 &crtc
->dev
->mode_config
.encoder_list
,
7489 if (&other_encoder
->new_crtc
->base
!= crtc
||
7490 encoder
== other_encoder
)
7499 static bool intel_encoder_crtc_ok(struct drm_encoder
*encoder
,
7500 struct drm_crtc
*crtc
)
7502 struct drm_device
*dev
;
7503 struct drm_crtc
*tmp
;
7506 WARN(!crtc
, "checking null crtc?\n");
7507 /* profmakx: this is to prevent the kernel from panicing */
7514 list_for_each_entry(tmp
, &dev
->mode_config
.crtc_list
, head
) {
7520 if (encoder
->possible_crtcs
& crtc_mask
)
7526 * intel_modeset_update_staged_output_state
7528 * Updates the staged output configuration state, e.g. after we've read out the
7531 static void intel_modeset_update_staged_output_state(struct drm_device
*dev
)
7533 struct intel_encoder
*encoder
;
7534 struct intel_connector
*connector
;
7536 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
,
7538 connector
->new_encoder
=
7539 to_intel_encoder(connector
->base
.encoder
);
7542 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
7545 to_intel_crtc(encoder
->base
.crtc
);
7550 * intel_modeset_commit_output_state
7552 * This function copies the stage display pipe configuration to the real one.
7554 static void intel_modeset_commit_output_state(struct drm_device
*dev
)
7556 struct intel_encoder
*encoder
;
7557 struct intel_connector
*connector
;
7559 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
,
7561 connector
->base
.encoder
= &connector
->new_encoder
->base
;
7564 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
7566 encoder
->base
.crtc
= &encoder
->new_crtc
->base
;
7571 pipe_config_set_bpp(struct drm_crtc
*crtc
,
7572 struct drm_framebuffer
*fb
,
7573 struct intel_crtc_config
*pipe_config
)
7575 struct drm_device
*dev
= crtc
->dev
;
7576 struct drm_connector
*connector
;
7579 switch (fb
->pixel_format
) {
7581 bpp
= 8*3; /* since we go through a colormap */
7583 case DRM_FORMAT_XRGB1555
:
7584 case DRM_FORMAT_ARGB1555
:
7585 /* checked in intel_framebuffer_init already */
7586 if (WARN_ON(INTEL_INFO(dev
)->gen
> 3))
7588 case DRM_FORMAT_RGB565
:
7589 bpp
= 6*3; /* min is 18bpp */
7591 case DRM_FORMAT_XBGR8888
:
7592 case DRM_FORMAT_ABGR8888
:
7593 /* checked in intel_framebuffer_init already */
7594 if (WARN_ON(INTEL_INFO(dev
)->gen
< 4))
7596 case DRM_FORMAT_XRGB8888
:
7597 case DRM_FORMAT_ARGB8888
:
7600 case DRM_FORMAT_XRGB2101010
:
7601 case DRM_FORMAT_ARGB2101010
:
7602 case DRM_FORMAT_XBGR2101010
:
7603 case DRM_FORMAT_ABGR2101010
:
7604 /* checked in intel_framebuffer_init already */
7605 if (WARN_ON(INTEL_INFO(dev
)->gen
< 4))
7609 /* TODO: gen4+ supports 16 bpc floating point, too. */
7611 DRM_DEBUG_KMS("unsupported depth\n");
7615 pipe_config
->pipe_bpp
= bpp
;
7617 /* Clamp display bpp to EDID value */
7618 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
,
7620 if (connector
->encoder
&& connector
->encoder
->crtc
!= crtc
)
7623 /* Don't use an invalid EDID bpc value */
7624 if (connector
->display_info
.bpc
&&
7625 connector
->display_info
.bpc
* 3 < bpp
) {
7626 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
7627 bpp
, connector
->display_info
.bpc
*3);
7628 pipe_config
->pipe_bpp
= connector
->display_info
.bpc
*3;
7635 static struct intel_crtc_config
*
7636 intel_modeset_pipe_config(struct drm_crtc
*crtc
,
7637 struct drm_framebuffer
*fb
,
7638 struct drm_display_mode
*mode
)
7640 struct drm_device
*dev
= crtc
->dev
;
7641 struct drm_encoder_helper_funcs
*encoder_funcs
;
7642 struct intel_encoder
*encoder
;
7643 struct intel_crtc_config
*pipe_config
;
7646 pipe_config
= kzalloc(sizeof(*pipe_config
), GFP_KERNEL
);
7648 return ERR_PTR(-ENOMEM
);
7650 drm_mode_copy(&pipe_config
->adjusted_mode
, mode
);
7651 drm_mode_copy(&pipe_config
->requested_mode
, mode
);
7653 plane_bpp
= pipe_config_set_bpp(crtc
, fb
, pipe_config
);
7657 /* Pass our mode to the connectors and the CRTC to give them a chance to
7658 * adjust it according to limitations or connector properties, and also
7659 * a chance to reject the mode entirely.
7661 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
7664 if (&encoder
->new_crtc
->base
!= crtc
)
7667 if (encoder
->compute_config
) {
7668 if (!(encoder
->compute_config(encoder
, pipe_config
))) {
7669 DRM_DEBUG_KMS("Encoder config failure\n");
7676 encoder_funcs
= encoder
->base
.helper_private
;
7677 if (!(encoder_funcs
->mode_fixup(&encoder
->base
,
7678 &pipe_config
->requested_mode
,
7679 &pipe_config
->adjusted_mode
))) {
7680 DRM_DEBUG_KMS("Encoder fixup failed\n");
7685 if (!(intel_crtc_compute_config(crtc
, pipe_config
))) {
7686 DRM_DEBUG_KMS("CRTC fixup failed\n");
7689 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc
->base
.id
);
7691 pipe_config
->dither
= pipe_config
->pipe_bpp
!= plane_bpp
;
7692 DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
7693 plane_bpp
, pipe_config
->pipe_bpp
, pipe_config
->dither
);
7698 return ERR_PTR(-EINVAL
);
7701 /* Computes which crtcs are affected and sets the relevant bits in the mask. For
7702 * simplicity we use the crtc's pipe number (because it's easier to obtain). */
7704 intel_modeset_affected_pipes(struct drm_crtc
*crtc
, unsigned *modeset_pipes
,
7705 unsigned *prepare_pipes
, unsigned *disable_pipes
)
7707 struct intel_crtc
*intel_crtc
;
7708 struct drm_device
*dev
= crtc
->dev
;
7709 struct intel_encoder
*encoder
;
7710 struct intel_connector
*connector
;
7711 struct drm_crtc
*tmp_crtc
;
7713 *disable_pipes
= *modeset_pipes
= *prepare_pipes
= 0;
7715 /* Check which crtcs have changed outputs connected to them, these need
7716 * to be part of the prepare_pipes mask. We don't (yet) support global
7717 * modeset across multiple crtcs, so modeset_pipes will only have one
7718 * bit set at most. */
7719 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
,
7721 if (connector
->base
.encoder
== &connector
->new_encoder
->base
)
7724 if (connector
->base
.encoder
) {
7725 tmp_crtc
= connector
->base
.encoder
->crtc
;
7727 *prepare_pipes
|= 1 << to_intel_crtc(tmp_crtc
)->pipe
;
7730 if (connector
->new_encoder
)
7732 1 << connector
->new_encoder
->new_crtc
->pipe
;
7735 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
7737 if (encoder
->base
.crtc
== &encoder
->new_crtc
->base
)
7740 if (encoder
->base
.crtc
) {
7741 tmp_crtc
= encoder
->base
.crtc
;
7743 *prepare_pipes
|= 1 << to_intel_crtc(tmp_crtc
)->pipe
;
7746 if (encoder
->new_crtc
)
7747 *prepare_pipes
|= 1 << encoder
->new_crtc
->pipe
;
7750 /* Check for any pipes that will be fully disabled ... */
7751 list_for_each_entry(intel_crtc
, &dev
->mode_config
.crtc_list
,
7755 /* Don't try to disable disabled crtcs. */
7756 if (!intel_crtc
->base
.enabled
)
7759 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
7761 if (encoder
->new_crtc
== intel_crtc
)
7766 *disable_pipes
|= 1 << intel_crtc
->pipe
;
7770 /* set_mode is also used to update properties on life display pipes. */
7771 intel_crtc
= to_intel_crtc(crtc
);
7773 *prepare_pipes
|= 1 << intel_crtc
->pipe
;
7776 * For simplicity do a full modeset on any pipe where the output routing
7777 * changed. We could be more clever, but that would require us to be
7778 * more careful with calling the relevant encoder->mode_set functions.
7781 *modeset_pipes
= *prepare_pipes
;
7783 /* ... and mask these out. */
7784 *modeset_pipes
&= ~(*disable_pipes
);
7785 *prepare_pipes
&= ~(*disable_pipes
);
7788 * HACK: We don't (yet) fully support global modesets. intel_set_config
7789 * obies this rule, but the modeset restore mode of
7790 * intel_modeset_setup_hw_state does not.
7792 *modeset_pipes
&= 1 << intel_crtc
->pipe
;
7793 *prepare_pipes
&= 1 << intel_crtc
->pipe
;
7796 static bool intel_crtc_in_use(struct drm_crtc
*crtc
)
7798 struct drm_encoder
*encoder
;
7799 struct drm_device
*dev
= crtc
->dev
;
7801 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
)
7802 if (encoder
->crtc
== crtc
)
7809 intel_modeset_update_state(struct drm_device
*dev
, unsigned prepare_pipes
)
7811 struct intel_encoder
*intel_encoder
;
7812 struct intel_crtc
*intel_crtc
;
7813 struct drm_connector
*connector
;
7815 list_for_each_entry(intel_encoder
, &dev
->mode_config
.encoder_list
,
7817 if (!intel_encoder
->base
.crtc
)
7820 intel_crtc
= to_intel_crtc(intel_encoder
->base
.crtc
);
7822 if (prepare_pipes
& (1 << intel_crtc
->pipe
))
7823 intel_encoder
->connectors_active
= false;
7826 intel_modeset_commit_output_state(dev
);
7828 /* Update computed state. */
7829 list_for_each_entry(intel_crtc
, &dev
->mode_config
.crtc_list
,
7831 intel_crtc
->base
.enabled
= intel_crtc_in_use(&intel_crtc
->base
);
7834 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
7835 if (!connector
->encoder
|| !connector
->encoder
->crtc
)
7838 intel_crtc
= to_intel_crtc(connector
->encoder
->crtc
);
7840 if (prepare_pipes
& (1 << intel_crtc
->pipe
)) {
7841 struct drm_property
*dpms_property
=
7842 dev
->mode_config
.dpms_property
;
7844 connector
->dpms
= DRM_MODE_DPMS_ON
;
7845 drm_object_property_set_value(&connector
->base
,
7849 intel_encoder
= to_intel_encoder(connector
->encoder
);
7850 intel_encoder
->connectors_active
= true;
7856 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
7857 list_for_each_entry((intel_crtc), \
7858 &(dev)->mode_config.crtc_list, \
7860 if (mask & (1 <<(intel_crtc)->pipe)) \
7863 intel_pipe_config_compare(struct intel_crtc_config
*current_config
,
7864 struct intel_crtc_config
*pipe_config
)
7866 if (current_config
->has_pch_encoder
!= pipe_config
->has_pch_encoder
) {
7867 DRM_ERROR("mismatch in has_pch_encoder "
7868 "(expected %i, found %i)\n",
7869 current_config
->has_pch_encoder
,
7870 pipe_config
->has_pch_encoder
);
7878 intel_modeset_check_state(struct drm_device
*dev
)
7880 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
7881 struct intel_crtc
*crtc
;
7882 struct intel_encoder
*encoder
;
7883 struct intel_connector
*connector
;
7884 struct intel_crtc_config pipe_config
;
7886 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
,
7888 /* This also checks the encoder/connector hw state with the
7889 * ->get_hw_state callbacks. */
7890 intel_connector_check_state(connector
);
7892 WARN(&connector
->new_encoder
->base
!= connector
->base
.encoder
,
7893 "connector's staged encoder doesn't match current encoder\n");
7896 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
7898 bool enabled
= false;
7899 bool active
= false;
7900 enum i915_pipe pipe
, tracked_pipe
;
7902 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
7903 encoder
->base
.base
.id
,
7904 drm_get_encoder_name(&encoder
->base
));
7906 WARN(&encoder
->new_crtc
->base
!= encoder
->base
.crtc
,
7907 "encoder's stage crtc doesn't match current crtc\n");
7908 WARN(encoder
->connectors_active
&& !encoder
->base
.crtc
,
7909 "encoder's active_connectors set, but no crtc\n");
7911 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
,
7913 if (connector
->base
.encoder
!= &encoder
->base
)
7916 if (connector
->base
.dpms
!= DRM_MODE_DPMS_OFF
)
7919 WARN(!!encoder
->base
.crtc
!= enabled
,
7920 "encoder's enabled state mismatch "
7921 "(expected %i, found %i)\n",
7922 !!encoder
->base
.crtc
, enabled
);
7923 WARN(active
&& !encoder
->base
.crtc
,
7924 "active encoder with no crtc\n");
7926 WARN(encoder
->connectors_active
!= active
,
7927 "encoder's computed active state doesn't match tracked active state "
7928 "(expected %i, found %i)\n", active
, encoder
->connectors_active
);
7930 active
= encoder
->get_hw_state(encoder
, &pipe
);
7931 WARN(active
!= encoder
->connectors_active
,
7932 "encoder's hw state doesn't match sw tracking "
7933 "(expected %i, found %i)\n",
7934 encoder
->connectors_active
, active
);
7936 if (!encoder
->base
.crtc
)
7939 tracked_pipe
= to_intel_crtc(encoder
->base
.crtc
)->pipe
;
7940 WARN(active
&& pipe
!= tracked_pipe
,
7941 "active encoder's pipe doesn't match"
7942 "(expected %i, found %i)\n",
7943 tracked_pipe
, pipe
);
7947 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
,
7949 bool enabled
= false;
7950 bool active
= false;
7952 DRM_DEBUG_KMS("[CRTC:%d]\n",
7953 crtc
->base
.base
.id
);
7955 WARN(crtc
->active
&& !crtc
->base
.enabled
,
7956 "active crtc, but not enabled in sw tracking\n");
7958 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
7960 if (encoder
->base
.crtc
!= &crtc
->base
)
7963 if (encoder
->connectors_active
)
7966 WARN(active
!= crtc
->active
,
7967 "crtc's computed active state doesn't match tracked active state "
7968 "(expected %i, found %i)\n", active
, crtc
->active
);
7969 WARN(enabled
!= crtc
->base
.enabled
,
7970 "crtc's computed enabled state doesn't match tracked enabled state "
7971 "(expected %i, found %i)\n", enabled
, crtc
->base
.enabled
);
7973 memset(&pipe_config
, 0, sizeof(pipe_config
));
7974 active
= dev_priv
->display
.get_pipe_config(crtc
,
7977 /* hw state is inconsistent with the pipe A quirk */
7978 if (crtc
->pipe
== PIPE_A
&& dev_priv
->quirks
& QUIRK_PIPEA_FORCE
)
7979 active
= crtc
->active
;
7981 WARN(crtc
->active
!= active
,
7982 "crtc active state doesn't match with hw state "
7983 "(expected %i, found %i)\n", crtc
->active
, active
);
7986 !intel_pipe_config_compare(&crtc
->config
, &pipe_config
),
7987 "pipe state doesn't match!\n");
7991 static int __intel_set_mode(struct drm_crtc
*crtc
,
7992 struct drm_display_mode
*mode
,
7993 int x
, int y
, struct drm_framebuffer
*fb
)
7995 struct drm_device
*dev
= crtc
->dev
;
7996 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
7997 struct drm_display_mode
*saved_mode
, *saved_hwmode
;
7998 struct intel_crtc_config
*pipe_config
= NULL
;
7999 struct intel_crtc
*intel_crtc
;
8000 unsigned disable_pipes
, prepare_pipes
, modeset_pipes
;
8003 saved_mode
= kmalloc(2 * sizeof(*saved_mode
), M_DRM
, M_WAITOK
);
8006 saved_hwmode
= saved_mode
+ 1;
8008 intel_modeset_affected_pipes(crtc
, &modeset_pipes
,
8009 &prepare_pipes
, &disable_pipes
);
8011 *saved_hwmode
= crtc
->hwmode
;
8012 *saved_mode
= crtc
->mode
;
8014 /* Hack: Because we don't (yet) support global modeset on multiple
8015 * crtcs, we don't keep track of the new mode for more than one crtc.
8016 * Hence simply check whether any bit is set in modeset_pipes in all the
8017 * pieces of code that are not yet converted to deal with mutliple crtcs
8018 * changing their mode at the same time. */
8019 if (modeset_pipes
) {
8020 pipe_config
= intel_modeset_pipe_config(crtc
, fb
, mode
);
8021 if (IS_ERR(pipe_config
)) {
8022 ret
= PTR_ERR(pipe_config
);
8029 DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
8030 modeset_pipes
, prepare_pipes
, disable_pipes
);
8032 for_each_intel_crtc_masked(dev
, disable_pipes
, intel_crtc
)
8033 intel_crtc_disable(&intel_crtc
->base
);
8035 for_each_intel_crtc_masked(dev
, prepare_pipes
, intel_crtc
) {
8036 if (intel_crtc
->base
.enabled
)
8037 dev_priv
->display
.crtc_disable(&intel_crtc
->base
);
8040 /* crtc->mode is already used by the ->mode_set callbacks, hence we need
8041 * to set it here already despite that we pass it down the callchain.
8043 if (modeset_pipes
) {
8044 enum transcoder tmp
= to_intel_crtc(crtc
)->config
.cpu_transcoder
;
8046 /* mode_set/enable/disable functions rely on a correct pipe
8048 to_intel_crtc(crtc
)->config
= *pipe_config
;
8049 to_intel_crtc(crtc
)->config
.cpu_transcoder
= tmp
;
8052 /* Only after disabling all output pipelines that will be changed can we
8053 * update the the output configuration. */
8054 intel_modeset_update_state(dev
, prepare_pipes
);
8056 if (dev_priv
->display
.modeset_global_resources
)
8057 dev_priv
->display
.modeset_global_resources(dev
);
8059 /* Set up the DPLL and any encoders state that needs to adjust or depend
8062 for_each_intel_crtc_masked(dev
, modeset_pipes
, intel_crtc
) {
8063 ret
= intel_crtc_mode_set(&intel_crtc
->base
,
8069 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
8070 for_each_intel_crtc_masked(dev
, prepare_pipes
, intel_crtc
)
8071 dev_priv
->display
.crtc_enable(&intel_crtc
->base
);
8073 if (modeset_pipes
) {
8074 /* Store real post-adjustment hardware mode. */
8075 crtc
->hwmode
= pipe_config
->adjusted_mode
;
8077 /* Calculate and store various constants which
8078 * are later needed by vblank and swap-completion
8079 * timestamping. They are derived from true hwmode.
8081 drm_calc_timestamping_constants(crtc
);
8084 /* FIXME: add subpixel order */
8086 if (ret
&& crtc
->enabled
) {
8087 crtc
->hwmode
= *saved_hwmode
;
8088 crtc
->mode
= *saved_mode
;
8097 int intel_set_mode(struct drm_crtc
*crtc
,
8098 struct drm_display_mode
*mode
,
8099 int x
, int y
, struct drm_framebuffer
*fb
)
8103 ret
= __intel_set_mode(crtc
, mode
, x
, y
, fb
);
8106 intel_modeset_check_state(crtc
->dev
);
8111 void intel_crtc_restore_mode(struct drm_crtc
*crtc
)
8113 intel_set_mode(crtc
, &crtc
->mode
, crtc
->x
, crtc
->y
, crtc
->fb
);
8116 #undef for_each_intel_crtc_masked
8118 static void intel_set_config_free(struct intel_set_config
*config
)
8123 drm_free(config
->save_connector_encoders
, M_DRM
);
8124 drm_free(config
->save_encoder_crtcs
, M_DRM
);
8125 drm_free(config
, M_DRM
);
8128 static int intel_set_config_save_state(struct drm_device
*dev
,
8129 struct intel_set_config
*config
)
8131 struct drm_encoder
*encoder
;
8132 struct drm_connector
*connector
;
8135 config
->save_encoder_crtcs
=
8136 kmalloc(dev
->mode_config
.num_encoder
*
8137 sizeof(struct drm_crtc
*), M_DRM
, M_WAITOK
| M_ZERO
);
8138 if (!config
->save_encoder_crtcs
)
8141 config
->save_connector_encoders
=
8142 kmalloc(dev
->mode_config
.num_connector
*
8143 sizeof(struct drm_encoder
*), M_DRM
, M_WAITOK
| M_ZERO
);
8144 if (!config
->save_connector_encoders
)
8147 /* Copy data. Note that driver private data is not affected.
8148 * Should anything bad happen only the expected state is
8149 * restored, not the drivers personal bookkeeping.
8152 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
) {
8153 config
->save_encoder_crtcs
[count
++] = encoder
->crtc
;
8157 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
8158 config
->save_connector_encoders
[count
++] = connector
->encoder
;
8164 static void intel_set_config_restore_state(struct drm_device
*dev
,
8165 struct intel_set_config
*config
)
8167 struct intel_encoder
*encoder
;
8168 struct intel_connector
*connector
;
8172 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, base
.head
) {
8174 to_intel_crtc(config
->save_encoder_crtcs
[count
++]);
8178 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, base
.head
) {
8179 connector
->new_encoder
=
8180 to_intel_encoder(config
->save_connector_encoders
[count
++]);
8185 is_crtc_connector_off(struct drm_crtc
*crtc
, struct drm_connector
*connectors
,
8190 for (i
= 0; i
< num_connectors
; i
++)
8191 if (connectors
[i
].encoder
&&
8192 connectors
[i
].encoder
->crtc
== crtc
&&
8193 connectors
[i
].dpms
!= DRM_MODE_DPMS_ON
)
8200 intel_set_config_compute_mode_changes(struct drm_mode_set
*set
,
8201 struct intel_set_config
*config
)
8204 /* We should be able to check here if the fb has the same properties
8205 * and then just flip_or_move it */
8206 if (set
->connectors
!= NULL
&&
8207 is_crtc_connector_off(set
->crtc
, *set
->connectors
,
8208 set
->num_connectors
)) {
8209 config
->mode_changed
= true;
8210 } else if (set
->crtc
->fb
!= set
->fb
) {
8211 /* If we have no fb then treat it as a full mode set */
8212 if (set
->crtc
->fb
== NULL
) {
8213 DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
8214 config
->mode_changed
= true;
8215 } else if (set
->fb
== NULL
) {
8216 config
->mode_changed
= true;
8217 } else if (set
->fb
->pixel_format
!=
8218 set
->crtc
->fb
->pixel_format
) {
8219 config
->mode_changed
= true;
8221 config
->fb_changed
= true;
8225 if (set
->fb
&& (set
->x
!= set
->crtc
->x
|| set
->y
!= set
->crtc
->y
))
8226 config
->fb_changed
= true;
8228 if (set
->mode
&& !drm_mode_equal(set
->mode
, &set
->crtc
->mode
)) {
8229 DRM_DEBUG_KMS("modes are different, full mode set\n");
8230 drm_mode_debug_printmodeline(&set
->crtc
->mode
);
8231 drm_mode_debug_printmodeline(set
->mode
);
8232 config
->mode_changed
= true;
8237 intel_modeset_stage_output_state(struct drm_device
*dev
,
8238 struct drm_mode_set
*set
,
8239 struct intel_set_config
*config
)
8241 struct drm_crtc
*new_crtc
;
8242 struct intel_connector
*connector
;
8243 struct intel_encoder
*encoder
;
8246 /* The upper layers ensure that we either disable a crtc or have a list
8247 * of connectors. For paranoia, double-check this. */
8248 WARN_ON(!set
->fb
&& (set
->num_connectors
!= 0));
8249 WARN_ON(set
->fb
&& (set
->num_connectors
== 0));
8252 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
,
8254 /* Otherwise traverse passed in connector list and get encoders
8256 for (ro
= 0; ro
< set
->num_connectors
; ro
++) {
8257 if (set
->connectors
[ro
] == &connector
->base
) {
8258 connector
->new_encoder
= connector
->encoder
;
8263 /* If we disable the crtc, disable all its connectors. Also, if
8264 * the connector is on the changing crtc but not on the new
8265 * connector list, disable it. */
8266 if ((!set
->fb
|| ro
== set
->num_connectors
) &&
8267 connector
->base
.encoder
&&
8268 connector
->base
.encoder
->crtc
== set
->crtc
) {
8269 connector
->new_encoder
= NULL
;
8271 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
8272 connector
->base
.base
.id
,
8273 drm_get_connector_name(&connector
->base
));
8277 if (&connector
->new_encoder
->base
!= connector
->base
.encoder
) {
8278 DRM_DEBUG_KMS("encoder changed, full mode switch\n");
8279 config
->mode_changed
= true;
8282 /* connector->new_encoder is now updated for all connectors. */
8284 /* Update crtc of enabled connectors. */
8286 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
,
8288 if (!connector
->new_encoder
)
8291 new_crtc
= connector
->new_encoder
->base
.crtc
;
8293 for (ro
= 0; ro
< set
->num_connectors
; ro
++) {
8294 if (set
->connectors
[ro
] == &connector
->base
)
8295 new_crtc
= set
->crtc
;
8298 /* Make sure the new CRTC will work with the encoder */
8299 if (!intel_encoder_crtc_ok(&connector
->new_encoder
->base
,
8303 connector
->encoder
->new_crtc
= to_intel_crtc(new_crtc
);
8305 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
8306 connector
->base
.base
.id
,
8307 drm_get_connector_name(&connector
->base
),
8311 /* Check for any encoders that needs to be disabled. */
8312 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
8314 list_for_each_entry(connector
,
8315 &dev
->mode_config
.connector_list
,
8317 if (connector
->new_encoder
== encoder
) {
8318 WARN_ON(!connector
->new_encoder
->new_crtc
);
8323 encoder
->new_crtc
= NULL
;
8325 /* Only now check for crtc changes so we don't miss encoders
8326 * that will be disabled. */
8327 if (&encoder
->new_crtc
->base
!= encoder
->base
.crtc
) {
8328 DRM_DEBUG_KMS("crtc changed, full mode switch\n");
8329 config
->mode_changed
= true;
8332 /* Now we've also updated encoder->new_crtc for all encoders. */
8337 static int intel_crtc_set_config(struct drm_mode_set
*set
)
8339 struct drm_device
*dev
;
8340 struct drm_mode_set save_set
;
8341 struct intel_set_config
*config
;
8346 BUG_ON(!set
->crtc
->helper_private
);
8348 /* Enforce sane interface api - has been abused by the fb helper. */
8349 BUG_ON(!set
->mode
&& set
->fb
);
8350 BUG_ON(set
->fb
&& set
->num_connectors
== 0);
8353 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
8354 set
->crtc
->base
.id
, set
->fb
->base
.id
,
8355 (int)set
->num_connectors
, set
->x
, set
->y
);
8357 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set
->crtc
->base
.id
);
8360 dev
= set
->crtc
->dev
;
8363 config
= kzalloc(sizeof(*config
), GFP_KERNEL
);
8367 ret
= intel_set_config_save_state(dev
, config
);
8371 save_set
.crtc
= set
->crtc
;
8372 save_set
.mode
= &set
->crtc
->mode
;
8373 save_set
.x
= set
->crtc
->x
;
8374 save_set
.y
= set
->crtc
->y
;
8375 save_set
.fb
= set
->crtc
->fb
;
8377 /* Compute whether we need a full modeset, only an fb base update or no
8378 * change at all. In the future we might also check whether only the
8379 * mode changed, e.g. for LVDS where we only change the panel fitter in
8381 intel_set_config_compute_mode_changes(set
, config
);
8383 ret
= intel_modeset_stage_output_state(dev
, set
, config
);
8387 if (config
->mode_changed
) {
8389 DRM_DEBUG_KMS("attempting to set mode from"
8391 drm_mode_debug_printmodeline(set
->mode
);
8394 ret
= intel_set_mode(set
->crtc
, set
->mode
,
8395 set
->x
, set
->y
, set
->fb
);
8396 } else if (config
->fb_changed
) {
8397 intel_crtc_wait_for_pending_flips(set
->crtc
);
8399 ret
= intel_pipe_set_base(set
->crtc
,
8400 set
->x
, set
->y
, set
->fb
);
8404 DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n",
8405 set
->crtc
->base
.id
, ret
);
8407 intel_set_config_restore_state(dev
, config
);
8409 /* Try to restore the config */
8410 if (config
->mode_changed
&&
8411 intel_set_mode(save_set
.crtc
, save_set
.mode
,
8412 save_set
.x
, save_set
.y
, save_set
.fb
))
8413 DRM_ERROR("failed to restore config after modeset failure\n");
8417 intel_set_config_free(config
);
8421 static const struct drm_crtc_funcs intel_crtc_funcs
= {
8422 .cursor_set
= intel_crtc_cursor_set
,
8423 .cursor_move
= intel_crtc_cursor_move
,
8424 .gamma_set
= intel_crtc_gamma_set
,
8425 .set_config
= intel_crtc_set_config
,
8426 .destroy
= intel_crtc_destroy
,
8427 .page_flip
= intel_crtc_page_flip
,
8430 static void intel_cpu_pll_init(struct drm_device
*dev
)
8433 intel_ddi_pll_init(dev
);
8436 static void intel_pch_pll_init(struct drm_device
*dev
)
8438 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
8441 if (dev_priv
->num_pch_pll
== 0) {
8442 DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
8446 for (i
= 0; i
< dev_priv
->num_pch_pll
; i
++) {
8447 dev_priv
->pch_plls
[i
].pll_reg
= _PCH_DPLL(i
);
8448 dev_priv
->pch_plls
[i
].fp0_reg
= _PCH_FP0(i
);
8449 dev_priv
->pch_plls
[i
].fp1_reg
= _PCH_FP1(i
);
8453 static void intel_crtc_init(struct drm_device
*dev
, int pipe
)
8455 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
8456 struct intel_crtc
*intel_crtc
;
8459 intel_crtc
= kzalloc(sizeof(struct intel_crtc
) + (INTELFB_CONN_LIMIT
* sizeof(struct drm_connector
*)), GFP_KERNEL
);
8460 if (intel_crtc
== NULL
)
8463 drm_crtc_init(dev
, &intel_crtc
->base
, &intel_crtc_funcs
);
8465 drm_mode_crtc_set_gamma_size(&intel_crtc
->base
, 256);
8466 for (i
= 0; i
< 256; i
++) {
8467 intel_crtc
->lut_r
[i
] = i
;
8468 intel_crtc
->lut_g
[i
] = i
;
8469 intel_crtc
->lut_b
[i
] = i
;
8472 /* Swap pipes & planes for FBC on pre-965 */
8473 intel_crtc
->pipe
= pipe
;
8474 intel_crtc
->plane
= pipe
;
8475 intel_crtc
->config
.cpu_transcoder
= pipe
;
8476 if (IS_MOBILE(dev
) && IS_GEN3(dev
)) {
8477 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
8478 intel_crtc
->plane
= !pipe
;
8481 BUG_ON(pipe
>= ARRAY_SIZE(dev_priv
->plane_to_crtc_mapping
) ||
8482 dev_priv
->plane_to_crtc_mapping
[intel_crtc
->plane
] != NULL
);
8483 dev_priv
->plane_to_crtc_mapping
[intel_crtc
->plane
] = &intel_crtc
->base
;
8484 dev_priv
->pipe_to_crtc_mapping
[intel_crtc
->pipe
] = &intel_crtc
->base
;
8486 drm_crtc_helper_add(&intel_crtc
->base
, &intel_helper_funcs
);
8489 int intel_get_pipe_from_crtc_id(struct drm_device
*dev
, void *data
,
8490 struct drm_file
*file
)
8492 struct drm_i915_get_pipe_from_crtc_id
*pipe_from_crtc_id
= data
;
8493 struct drm_mode_object
*drmmode_obj
;
8494 struct intel_crtc
*crtc
;
8496 if (!drm_core_check_feature(dev
, DRIVER_MODESET
))
8499 drmmode_obj
= drm_mode_object_find(dev
, pipe_from_crtc_id
->crtc_id
,
8500 DRM_MODE_OBJECT_CRTC
);
8503 DRM_ERROR("no such CRTC id\n");
8507 crtc
= to_intel_crtc(obj_to_crtc(drmmode_obj
));
8508 pipe_from_crtc_id
->pipe
= crtc
->pipe
;
8513 static int intel_encoder_clones(struct intel_encoder
*encoder
)
8515 struct drm_device
*dev
= encoder
->base
.dev
;
8516 struct intel_encoder
*source_encoder
;
8520 list_for_each_entry(source_encoder
,
8521 &dev
->mode_config
.encoder_list
, base
.head
) {
8523 if (encoder
== source_encoder
)
8524 index_mask
|= (1 << entry
);
8526 /* Intel hw has only one MUX where enocoders could be cloned. */
8527 if (encoder
->cloneable
&& source_encoder
->cloneable
)
8528 index_mask
|= (1 << entry
);
8536 static bool has_edp_a(struct drm_device
*dev
)
8538 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
8540 if (!IS_MOBILE(dev
))
8543 if ((I915_READ(DP_A
) & DP_DETECTED
) == 0)
8547 (I915_READ(ILK_DISPLAY_CHICKEN_FUSES
) & ILK_eDP_A_DISABLE
))
8553 static void intel_setup_outputs(struct drm_device
*dev
)
8555 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
8556 struct intel_encoder
*encoder
;
8557 bool dpd_is_edp
= false;
8560 has_lvds
= intel_lvds_init(dev
);
8561 if (!has_lvds
&& !HAS_PCH_SPLIT(dev
)) {
8562 /* disable the panel fitter on everything but LVDS */
8563 I915_WRITE(PFIT_CONTROL
, 0);
8567 intel_crt_init(dev
);
8572 /* Haswell uses DDI functions to detect digital outputs */
8573 found
= I915_READ(DDI_BUF_CTL_A
) & DDI_INIT_DISPLAY_DETECTED
;
8574 /* DDI A only supports eDP */
8576 intel_ddi_init(dev
, PORT_A
);
8578 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
8580 found
= I915_READ(SFUSE_STRAP
);
8582 if (found
& SFUSE_STRAP_DDIB_DETECTED
)
8583 intel_ddi_init(dev
, PORT_B
);
8584 if (found
& SFUSE_STRAP_DDIC_DETECTED
)
8585 intel_ddi_init(dev
, PORT_C
);
8586 if (found
& SFUSE_STRAP_DDID_DETECTED
)
8587 intel_ddi_init(dev
, PORT_D
);
8588 } else if (HAS_PCH_SPLIT(dev
)) {
8590 dpd_is_edp
= intel_dpd_is_edp(dev
);
8593 intel_dp_init(dev
, DP_A
, PORT_A
);
8595 if (I915_READ(PCH_HDMIB
) & SDVO_DETECTED
) {
8596 /* PCH SDVOB multiplex with HDMIB */
8597 found
= intel_sdvo_init(dev
, PCH_SDVOB
, true);
8599 intel_hdmi_init(dev
, PCH_HDMIB
, PORT_B
);
8600 if (!found
&& (I915_READ(PCH_DP_B
) & DP_DETECTED
))
8601 intel_dp_init(dev
, PCH_DP_B
, PORT_B
);
8604 if (I915_READ(PCH_HDMIC
) & SDVO_DETECTED
)
8605 intel_hdmi_init(dev
, PCH_HDMIC
, PORT_C
);
8607 if (!dpd_is_edp
&& I915_READ(PCH_HDMID
) & SDVO_DETECTED
)
8608 intel_hdmi_init(dev
, PCH_HDMID
, PORT_D
);
8610 if (I915_READ(PCH_DP_C
) & DP_DETECTED
)
8611 intel_dp_init(dev
, PCH_DP_C
, PORT_C
);
8613 if (I915_READ(PCH_DP_D
) & DP_DETECTED
)
8614 intel_dp_init(dev
, PCH_DP_D
, PORT_D
);
8615 } else if (IS_VALLEYVIEW(dev
)) {
8616 /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
8617 if (I915_READ(VLV_DISPLAY_BASE
+ DP_C
) & DP_DETECTED
)
8618 intel_dp_init(dev
, VLV_DISPLAY_BASE
+ DP_C
, PORT_C
);
8620 if (I915_READ(VLV_DISPLAY_BASE
+ GEN4_HDMIB
) & SDVO_DETECTED
) {
8621 intel_hdmi_init(dev
, VLV_DISPLAY_BASE
+ GEN4_HDMIB
,
8623 if (I915_READ(VLV_DISPLAY_BASE
+ DP_B
) & DP_DETECTED
)
8624 intel_dp_init(dev
, VLV_DISPLAY_BASE
+ DP_B
, PORT_B
);
8626 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev
)) {
8629 if (I915_READ(GEN3_SDVOB
) & SDVO_DETECTED
) {
8630 DRM_DEBUG_KMS("probing SDVOB\n");
8631 found
= intel_sdvo_init(dev
, GEN3_SDVOB
, true);
8632 if (!found
&& SUPPORTS_INTEGRATED_HDMI(dev
)) {
8633 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
8634 intel_hdmi_init(dev
, GEN4_HDMIB
, PORT_B
);
8637 if (!found
&& SUPPORTS_INTEGRATED_DP(dev
)) {
8638 DRM_DEBUG_KMS("probing DP_B\n");
8639 intel_dp_init(dev
, DP_B
, PORT_B
);
8643 /* Before G4X SDVOC doesn't have its own detect register */
8645 if (I915_READ(GEN3_SDVOB
) & SDVO_DETECTED
) {
8646 DRM_DEBUG_KMS("probing SDVOC\n");
8647 found
= intel_sdvo_init(dev
, GEN3_SDVOC
, false);
8650 if (!found
&& (I915_READ(GEN3_SDVOC
) & SDVO_DETECTED
)) {
8652 if (SUPPORTS_INTEGRATED_HDMI(dev
)) {
8653 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
8654 intel_hdmi_init(dev
, GEN4_HDMIC
, PORT_C
);
8656 if (SUPPORTS_INTEGRATED_DP(dev
)) {
8657 DRM_DEBUG_KMS("probing DP_C\n");
8658 intel_dp_init(dev
, DP_C
, PORT_C
);
8662 if (SUPPORTS_INTEGRATED_DP(dev
) &&
8663 (I915_READ(DP_D
) & DP_DETECTED
)) {
8664 DRM_DEBUG_KMS("probing DP_D\n");
8665 intel_dp_init(dev
, DP_D
, PORT_D
);
8667 } else if (IS_GEN2(dev
)) {
8669 intel_dvo_init(dev
);
8673 if (SUPPORTS_TV(dev
))
8676 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, base
.head
) {
8677 encoder
->base
.possible_crtcs
= encoder
->crtc_mask
;
8678 encoder
->base
.possible_clones
=
8679 intel_encoder_clones(encoder
);
8682 intel_init_pch_refclk(dev
);
8684 drm_helper_move_panel_connectors_to_head(dev
);
8687 static void intel_user_framebuffer_destroy(struct drm_framebuffer
*fb
)
8689 struct intel_framebuffer
*intel_fb
= to_intel_framebuffer(fb
);
8691 drm_framebuffer_cleanup(fb
);
8692 drm_gem_object_unreference_unlocked(&intel_fb
->obj
->base
);
8694 drm_free(intel_fb
, M_DRM
);
8697 static int intel_user_framebuffer_create_handle(struct drm_framebuffer
*fb
,
8698 struct drm_file
*file
,
8699 unsigned int *handle
)
8701 struct intel_framebuffer
*intel_fb
= to_intel_framebuffer(fb
);
8702 struct drm_i915_gem_object
*obj
= intel_fb
->obj
;
8704 return drm_gem_handle_create(file
, &obj
->base
, handle
);
8707 static const struct drm_framebuffer_funcs intel_fb_funcs
= {
8708 .destroy
= intel_user_framebuffer_destroy
,
8709 .create_handle
= intel_user_framebuffer_create_handle
,
8712 int intel_framebuffer_init(struct drm_device
*dev
,
8713 struct intel_framebuffer
*intel_fb
,
8714 struct drm_mode_fb_cmd2
*mode_cmd
,
8715 struct drm_i915_gem_object
*obj
)
8719 if (obj
->tiling_mode
== I915_TILING_Y
) {
8720 DRM_DEBUG("hardware does not support tiling Y\n");
8724 if (mode_cmd
->pitches
[0] & 63) {
8725 DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
8726 mode_cmd
->pitches
[0]);
8730 /* FIXME <= Gen4 stride limits are bit unclear */
8731 if (mode_cmd
->pitches
[0] > 32768) {
8732 DRM_DEBUG("pitch (%d) must be at less than 32768\n",
8733 mode_cmd
->pitches
[0]);
8737 if (obj
->tiling_mode
!= I915_TILING_NONE
&&
8738 mode_cmd
->pitches
[0] != obj
->stride
) {
8739 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
8740 mode_cmd
->pitches
[0], obj
->stride
);
8744 /* Reject formats not supported by any plane early. */
8745 switch (mode_cmd
->pixel_format
) {
8747 case DRM_FORMAT_RGB565
:
8748 case DRM_FORMAT_XRGB8888
:
8749 case DRM_FORMAT_ARGB8888
:
8751 case DRM_FORMAT_XRGB1555
:
8752 case DRM_FORMAT_ARGB1555
:
8753 if (INTEL_INFO(dev
)->gen
> 3) {
8754 DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd
->pixel_format
);
8758 case DRM_FORMAT_XBGR8888
:
8759 case DRM_FORMAT_ABGR8888
:
8760 case DRM_FORMAT_XRGB2101010
:
8761 case DRM_FORMAT_ARGB2101010
:
8762 case DRM_FORMAT_XBGR2101010
:
8763 case DRM_FORMAT_ABGR2101010
:
8764 if (INTEL_INFO(dev
)->gen
< 4) {
8765 DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd
->pixel_format
);
8769 case DRM_FORMAT_YUYV
:
8770 case DRM_FORMAT_UYVY
:
8771 case DRM_FORMAT_YVYU
:
8772 case DRM_FORMAT_VYUY
:
8773 if (INTEL_INFO(dev
)->gen
< 5) {
8774 DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd
->pixel_format
);
8779 DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd
->pixel_format
);
8783 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
8784 if (mode_cmd
->offsets
[0] != 0)
8787 drm_helper_mode_fill_fb_struct(&intel_fb
->base
, mode_cmd
);
8788 intel_fb
->obj
= obj
;
8790 ret
= drm_framebuffer_init(dev
, &intel_fb
->base
, &intel_fb_funcs
);
8792 DRM_ERROR("framebuffer init failed %d\n", ret
);
8799 static struct drm_framebuffer
*
8800 intel_user_framebuffer_create(struct drm_device
*dev
,
8801 struct drm_file
*filp
,
8802 struct drm_mode_fb_cmd2
*mode_cmd
)
8804 struct drm_i915_gem_object
*obj
;
8806 obj
= to_intel_bo(drm_gem_object_lookup(dev
, filp
,
8807 mode_cmd
->handles
[0]));
8808 if (&obj
->base
== NULL
)
8809 return ERR_PTR(-ENOENT
);
8811 return intel_framebuffer_create(dev
, mode_cmd
, obj
);
8814 static const struct drm_mode_config_funcs intel_mode_funcs
= {
8815 .fb_create
= intel_user_framebuffer_create
,
8816 .output_poll_changed
= intel_fb_output_poll_changed
,
8819 /* Set up chip specific display functions */
8820 static void intel_init_display(struct drm_device
*dev
)
8822 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
8825 dev_priv
->display
.get_pipe_config
= haswell_get_pipe_config
;
8826 dev_priv
->display
.crtc_mode_set
= haswell_crtc_mode_set
;
8827 dev_priv
->display
.crtc_enable
= haswell_crtc_enable
;
8828 dev_priv
->display
.crtc_disable
= haswell_crtc_disable
;
8829 dev_priv
->display
.off
= haswell_crtc_off
;
8830 dev_priv
->display
.update_plane
= ironlake_update_plane
;
8831 } else if (HAS_PCH_SPLIT(dev
)) {
8832 dev_priv
->display
.get_pipe_config
= ironlake_get_pipe_config
;
8833 dev_priv
->display
.crtc_mode_set
= ironlake_crtc_mode_set
;
8834 dev_priv
->display
.crtc_enable
= ironlake_crtc_enable
;
8835 dev_priv
->display
.crtc_disable
= ironlake_crtc_disable
;
8836 dev_priv
->display
.off
= ironlake_crtc_off
;
8837 dev_priv
->display
.update_plane
= ironlake_update_plane
;
8839 dev_priv
->display
.get_pipe_config
= i9xx_get_pipe_config
;
8840 dev_priv
->display
.crtc_mode_set
= i9xx_crtc_mode_set
;
8841 dev_priv
->display
.crtc_enable
= i9xx_crtc_enable
;
8842 dev_priv
->display
.crtc_disable
= i9xx_crtc_disable
;
8843 dev_priv
->display
.off
= i9xx_crtc_off
;
8844 dev_priv
->display
.update_plane
= i9xx_update_plane
;
8847 /* Returns the core display clock speed */
8848 if (IS_VALLEYVIEW(dev
))
8849 dev_priv
->display
.get_display_clock_speed
=
8850 valleyview_get_display_clock_speed
;
8851 else if (IS_I945G(dev
) || (IS_G33(dev
) && !IS_PINEVIEW_M(dev
)))
8852 dev_priv
->display
.get_display_clock_speed
=
8853 i945_get_display_clock_speed
;
8854 else if (IS_I915G(dev
))
8855 dev_priv
->display
.get_display_clock_speed
=
8856 i915_get_display_clock_speed
;
8857 else if (IS_I945GM(dev
) || IS_845G(dev
) || IS_PINEVIEW_M(dev
))
8858 dev_priv
->display
.get_display_clock_speed
=
8859 i9xx_misc_get_display_clock_speed
;
8860 else if (IS_I915GM(dev
))
8861 dev_priv
->display
.get_display_clock_speed
=
8862 i915gm_get_display_clock_speed
;
8863 else if (IS_I865G(dev
))
8864 dev_priv
->display
.get_display_clock_speed
=
8865 i865_get_display_clock_speed
;
8866 else if (IS_I85X(dev
))
8867 dev_priv
->display
.get_display_clock_speed
=
8868 i855_get_display_clock_speed
;
8870 dev_priv
->display
.get_display_clock_speed
=
8871 i830_get_display_clock_speed
;
8873 if (HAS_PCH_SPLIT(dev
)) {
8875 dev_priv
->display
.fdi_link_train
= ironlake_fdi_link_train
;
8876 dev_priv
->display
.write_eld
= ironlake_write_eld
;
8877 } else if (IS_GEN6(dev
)) {
8878 dev_priv
->display
.fdi_link_train
= gen6_fdi_link_train
;
8879 dev_priv
->display
.write_eld
= ironlake_write_eld
;
8880 } else if (IS_IVYBRIDGE(dev
)) {
8881 /* FIXME: detect B0+ stepping and use auto training */
8882 dev_priv
->display
.fdi_link_train
= ivb_manual_fdi_link_train
;
8883 dev_priv
->display
.write_eld
= ironlake_write_eld
;
8884 dev_priv
->display
.modeset_global_resources
=
8885 ivb_modeset_global_resources
;
8886 } else if (IS_HASWELL(dev
)) {
8887 dev_priv
->display
.fdi_link_train
= hsw_fdi_link_train
;
8888 dev_priv
->display
.write_eld
= haswell_write_eld
;
8889 dev_priv
->display
.modeset_global_resources
=
8890 haswell_modeset_global_resources
;
8892 } else if (IS_G4X(dev
)) {
8893 dev_priv
->display
.write_eld
= g4x_write_eld
;
8896 /* Default just returns -ENODEV to indicate unsupported */
8897 dev_priv
->display
.queue_flip
= intel_default_queue_flip
;
8899 switch (INTEL_INFO(dev
)->gen
) {
8901 dev_priv
->display
.queue_flip
= intel_gen2_queue_flip
;
8905 dev_priv
->display
.queue_flip
= intel_gen3_queue_flip
;
8910 dev_priv
->display
.queue_flip
= intel_gen4_queue_flip
;
8914 dev_priv
->display
.queue_flip
= intel_gen6_queue_flip
;
8917 dev_priv
->display
.queue_flip
= intel_gen7_queue_flip
;
8923 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
8924 * resume, or other times. This quirk makes sure that's the case for
8927 static void quirk_pipea_force(struct drm_device
*dev
)
8929 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
8931 dev_priv
->quirks
|= QUIRK_PIPEA_FORCE
;
8932 DRM_INFO("applying pipe a force quirk\n");
8936 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
8938 static void quirk_ssc_force_disable(struct drm_device
*dev
)
8940 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
8941 dev_priv
->quirks
|= QUIRK_LVDS_SSC_DISABLE
;
8942 DRM_INFO("applying lvds SSC disable quirk\n");
8946 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
8949 static void quirk_invert_brightness(struct drm_device
*dev
)
8951 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
8952 dev_priv
->quirks
|= QUIRK_INVERT_BRIGHTNESS
;
8953 DRM_INFO("applying inverted panel brightness quirk\n");
8956 struct intel_quirk
{
8958 int subsystem_vendor
;
8959 int subsystem_device
;
8960 void (*hook
)(struct drm_device
*dev
);
8963 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
8964 struct intel_dmi_quirk
{
8965 void (*hook
)(struct drm_device
*dev
);
8966 const struct dmi_system_id (*dmi_id_list
)[];
8969 static int intel_dmi_reverse_brightness(const struct dmi_system_id
*id
)
8971 DRM_INFO("Backlight polarity reversed on %s\n", id
->ident
);
8975 static const struct intel_dmi_quirk intel_dmi_quirks
[] = {
8977 .dmi_id_list
= &(const struct dmi_system_id
[]) {
8979 .callback
= intel_dmi_reverse_brightness
,
8980 .ident
= "NCR Corporation",
8981 .matches
= {DMI_MATCH(DMI_SYS_VENDOR
, "NCR Corporation"),
8982 DMI_MATCH(DMI_PRODUCT_NAME
, ""),
8985 { } /* terminating entry */
8987 .hook
= quirk_invert_brightness
,
8991 static struct intel_quirk intel_quirks
[] = {
8992 /* HP Mini needs pipe A force quirk (LP: #322104) */
8993 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force
},
8995 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
8996 { 0x2592, 0x1179, 0x0001, quirk_pipea_force
},
8998 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
8999 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force
},
9001 /* 830/845 need to leave pipe A & dpll A up */
9002 { 0x2562, PCI_ANY_ID
, PCI_ANY_ID
, quirk_pipea_force
},
9003 { 0x3577, PCI_ANY_ID
, PCI_ANY_ID
, quirk_pipea_force
},
9005 /* Lenovo U160 cannot use SSC on LVDS */
9006 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable
},
9008 /* Sony Vaio Y cannot use SSC on LVDS */
9009 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable
},
9011 /* Acer Aspire 5734Z must invert backlight brightness */
9012 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness
},
9014 /* Acer/eMachines G725 */
9015 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness
},
9017 /* Acer/eMachines e725 */
9018 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness
},
9020 /* Acer/Packard Bell NCL20 */
9021 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness
},
9023 /* Acer Aspire 4736Z */
9024 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness
},
9027 static void intel_init_quirks(struct drm_device
*dev
)
9033 for (i
= 0; i
< ARRAY_SIZE(intel_quirks
); i
++) {
9034 struct intel_quirk
*q
= &intel_quirks
[i
];
9035 if (pci_get_device(d
) == q
->device
&&
9036 (pci_get_subvendor(d
) == q
->subsystem_vendor
||
9037 q
->subsystem_vendor
== PCI_ANY_ID
) &&
9038 (pci_get_subdevice(d
) == q
->subsystem_device
||
9039 q
->subsystem_device
== PCI_ANY_ID
))
9042 for (i
= 0; i
< ARRAY_SIZE(intel_dmi_quirks
); i
++) {
9043 if (dmi_check_system(*intel_dmi_quirks
[i
].dmi_id_list
) != 0)
9044 intel_dmi_quirks
[i
].hook(dev
);
9048 /* Disable the VGA plane that we never use */
9049 static void i915_disable_vga(struct drm_device
*dev
)
9051 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
9053 u32 vga_reg
= i915_vgacntrl_reg(dev
);
9056 vga_get_uninterruptible(dev
->pdev
, VGA_RSRC_LEGACY_IO
);
9058 outb(VGA_SR_INDEX
, 1);
9059 sr1
= inb(VGA_SR_DATA
);
9060 outb(VGA_SR_DATA
, sr1
| 1 << 5);
9062 vga_put(dev
->pdev
, VGA_RSRC_LEGACY_IO
);
9066 I915_WRITE(vga_reg
, VGA_DISP_DISABLE
);
9067 POSTING_READ(vga_reg
);
9070 void intel_modeset_init_hw(struct drm_device
*dev
)
9072 intel_init_power_well(dev
);
9074 intel_prepare_ddi(dev
);
9076 intel_init_clock_gating(dev
);
9078 mutex_lock(&dev
->struct_mutex
);
9079 intel_enable_gt_powersave(dev
);
9080 mutex_unlock(&dev
->struct_mutex
);
9083 void intel_modeset_init(struct drm_device
*dev
)
9085 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
9088 drm_mode_config_init(dev
);
9090 dev
->mode_config
.min_width
= 0;
9091 dev
->mode_config
.min_height
= 0;
9093 dev
->mode_config
.preferred_depth
= 24;
9094 dev
->mode_config
.prefer_shadow
= 1;
9096 dev
->mode_config
.funcs
= &intel_mode_funcs
;
9098 intel_init_quirks(dev
);
9102 if (INTEL_INFO(dev
)->num_pipes
== 0)
9105 intel_init_display(dev
);
9108 dev
->mode_config
.max_width
= 2048;
9109 dev
->mode_config
.max_height
= 2048;
9110 } else if (IS_GEN3(dev
)) {
9111 dev
->mode_config
.max_width
= 4096;
9112 dev
->mode_config
.max_height
= 4096;
9114 dev
->mode_config
.max_width
= 8192;
9115 dev
->mode_config
.max_height
= 8192;
9117 dev
->mode_config
.fb_base
= dev
->agp
->base
;
9119 DRM_DEBUG_KMS("%d display pipe%s available.\n",
9120 INTEL_INFO(dev
)->num_pipes
,
9121 INTEL_INFO(dev
)->num_pipes
> 1 ? "s" : "");
9123 for (i
= 0; i
< INTEL_INFO(dev
)->num_pipes
; i
++) {
9124 intel_crtc_init(dev
, i
);
9125 for (j
= 0; j
< dev_priv
->num_plane
; j
++) {
9126 ret
= intel_plane_init(dev
, i
, j
);
9128 DRM_DEBUG_KMS("pipe %d plane %d init failed: %d\n",
9133 intel_cpu_pll_init(dev
);
9134 intel_pch_pll_init(dev
);
9136 /* Just disable it once at startup */
9137 i915_disable_vga(dev
);
9138 intel_setup_outputs(dev
);
9140 /* Just in case the BIOS is doing something questionable. */
9141 intel_disable_fbc(dev
);
9145 intel_connector_break_all_links(struct intel_connector
*connector
)
9147 connector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
9148 connector
->base
.encoder
= NULL
;
9149 connector
->encoder
->connectors_active
= false;
9150 connector
->encoder
->base
.crtc
= NULL
;
9153 static void intel_enable_pipe_a(struct drm_device
*dev
)
9155 struct intel_connector
*connector
;
9156 struct drm_connector
*crt
= NULL
;
9157 struct intel_load_detect_pipe load_detect_temp
;
9159 /* We can't just switch on the pipe A, we need to set things up with a
9160 * proper mode and output configuration. As a gross hack, enable pipe A
9161 * by enabling the load detect pipe once. */
9162 list_for_each_entry(connector
,
9163 &dev
->mode_config
.connector_list
,
9165 if (connector
->encoder
->type
== INTEL_OUTPUT_ANALOG
) {
9166 crt
= &connector
->base
;
9174 if (intel_get_load_detect_pipe(crt
, NULL
, &load_detect_temp
))
9175 intel_release_load_detect_pipe(crt
, &load_detect_temp
);
9181 intel_check_plane_mapping(struct intel_crtc
*crtc
)
9183 struct drm_device
*dev
= crtc
->base
.dev
;
9184 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
9187 if (INTEL_INFO(dev
)->num_pipes
== 1)
9190 reg
= DSPCNTR(!crtc
->plane
);
9191 val
= I915_READ(reg
);
9193 if ((val
& DISPLAY_PLANE_ENABLE
) &&
9194 (!!(val
& DISPPLANE_SEL_PIPE_MASK
) == crtc
->pipe
))
9200 static void intel_sanitize_crtc(struct intel_crtc
*crtc
)
9202 struct drm_device
*dev
= crtc
->base
.dev
;
9203 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
9206 /* Clear any frame start delays used for debugging left by the BIOS */
9207 reg
= PIPECONF(crtc
->config
.cpu_transcoder
);
9208 I915_WRITE(reg
, I915_READ(reg
) & ~PIPECONF_FRAME_START_DELAY_MASK
);
9210 /* We need to sanitize the plane -> pipe mapping first because this will
9211 * disable the crtc (and hence change the state) if it is wrong. Note
9212 * that gen4+ has a fixed plane -> pipe mapping. */
9213 if (INTEL_INFO(dev
)->gen
< 4 && !intel_check_plane_mapping(crtc
)) {
9214 struct intel_connector
*connector
;
9217 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
9218 crtc
->base
.base
.id
);
9220 /* Pipe has the wrong plane attached and the plane is active.
9221 * Temporarily change the plane mapping and disable everything
9223 plane
= crtc
->plane
;
9224 crtc
->plane
= !plane
;
9225 dev_priv
->display
.crtc_disable(&crtc
->base
);
9226 crtc
->plane
= plane
;
9228 /* ... and break all links. */
9229 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
,
9231 if (connector
->encoder
->base
.crtc
!= &crtc
->base
)
9234 intel_connector_break_all_links(connector
);
9237 WARN_ON(crtc
->active
);
9238 crtc
->base
.enabled
= false;
9241 if (dev_priv
->quirks
& QUIRK_PIPEA_FORCE
&&
9242 crtc
->pipe
== PIPE_A
&& !crtc
->active
) {
9243 /* BIOS forgot to enable pipe A, this mostly happens after
9244 * resume. Force-enable the pipe to fix this, the update_dpms
9245 * call below we restore the pipe to the right state, but leave
9246 * the required bits on. */
9247 intel_enable_pipe_a(dev
);
9250 /* Adjust the state of the output pipe according to whether we
9251 * have active connectors/encoders. */
9252 intel_crtc_update_dpms(&crtc
->base
);
9254 if (crtc
->active
!= crtc
->base
.enabled
) {
9255 struct intel_encoder
*encoder
;
9257 /* This can happen either due to bugs in the get_hw_state
9258 * functions or because the pipe is force-enabled due to the
9260 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
9262 crtc
->base
.enabled
? "enabled" : "disabled",
9263 crtc
->active
? "enabled" : "disabled");
9265 crtc
->base
.enabled
= crtc
->active
;
9267 /* Because we only establish the connector -> encoder ->
9268 * crtc links if something is active, this means the
9269 * crtc is now deactivated. Break the links. connector
9270 * -> encoder links are only establish when things are
9271 * actually up, hence no need to break them. */
9272 WARN_ON(crtc
->active
);
9274 for_each_encoder_on_crtc(dev
, &crtc
->base
, encoder
) {
9275 WARN_ON(encoder
->connectors_active
);
9276 encoder
->base
.crtc
= NULL
;
9281 static void intel_sanitize_encoder(struct intel_encoder
*encoder
)
9283 struct intel_connector
*connector
;
9284 struct drm_device
*dev
= encoder
->base
.dev
;
9286 /* We need to check both for a crtc link (meaning that the
9287 * encoder is active and trying to read from a pipe) and the
9288 * pipe itself being active. */
9289 bool has_active_crtc
= encoder
->base
.crtc
&&
9290 to_intel_crtc(encoder
->base
.crtc
)->active
;
9292 if (encoder
->connectors_active
&& !has_active_crtc
) {
9293 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
9294 encoder
->base
.base
.id
,
9295 drm_get_encoder_name(&encoder
->base
));
9297 /* Connector is active, but has no active pipe. This is
9298 * fallout from our resume register restoring. Disable
9299 * the encoder manually again. */
9300 if (encoder
->base
.crtc
) {
9301 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
9302 encoder
->base
.base
.id
,
9303 drm_get_encoder_name(&encoder
->base
));
9304 encoder
->disable(encoder
);
9307 /* Inconsistent output/port/pipe state happens presumably due to
9308 * a bug in one of the get_hw_state functions. Or someplace else
9309 * in our code, like the register restore mess on resume. Clamp
9310 * things to off as a safer default. */
9311 list_for_each_entry(connector
,
9312 &dev
->mode_config
.connector_list
,
9314 if (connector
->encoder
!= encoder
)
9317 intel_connector_break_all_links(connector
);
9320 /* Enabled encoders without active connectors will be fixed in
9321 * the crtc fixup. */
9324 void i915_redisable_vga(struct drm_device
*dev
)
9326 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
9327 u32 vga_reg
= i915_vgacntrl_reg(dev
);
9329 if (I915_READ(vga_reg
) != VGA_DISP_DISABLE
) {
9330 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
9331 i915_disable_vga(dev
);
9335 /* Scan out the current hw modeset state, sanitizes it and maps it into the drm
9336 * and i915 state tracking structures. */
9337 void intel_modeset_setup_hw_state(struct drm_device
*dev
,
9340 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
9341 enum i915_pipe pipe
;
9343 struct drm_plane
*plane
;
9344 struct intel_crtc
*crtc
;
9345 struct intel_encoder
*encoder
;
9346 struct intel_connector
*connector
;
9349 tmp
= I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP
));
9351 if (tmp
& TRANS_DDI_FUNC_ENABLE
) {
9352 switch (tmp
& TRANS_DDI_EDP_INPUT_MASK
) {
9353 case TRANS_DDI_EDP_INPUT_A_ON
:
9354 case TRANS_DDI_EDP_INPUT_A_ONOFF
:
9357 case TRANS_DDI_EDP_INPUT_B_ONOFF
:
9360 case TRANS_DDI_EDP_INPUT_C_ONOFF
:
9364 /* A bogus value has been programmed, disable
9366 WARN(1, "Bogus eDP source %08x\n", tmp
);
9367 intel_ddi_disable_transcoder_func(dev_priv
,
9372 crtc
= to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
9373 crtc
->config
.cpu_transcoder
= TRANSCODER_EDP
;
9375 DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
9381 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
,
9383 enum transcoder tmp
= crtc
->config
.cpu_transcoder
;
9384 memset(&crtc
->config
, 0, sizeof(crtc
->config
));
9385 crtc
->config
.cpu_transcoder
= tmp
;
9387 crtc
->active
= dev_priv
->display
.get_pipe_config(crtc
,
9390 crtc
->base
.enabled
= crtc
->active
;
9392 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
9394 crtc
->active
? "enabled" : "disabled");
9398 intel_ddi_setup_hw_pll_state(dev
);
9400 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
9404 if (encoder
->get_hw_state(encoder
, &pipe
)) {
9405 encoder
->base
.crtc
=
9406 dev_priv
->pipe_to_crtc_mapping
[pipe
];
9408 encoder
->base
.crtc
= NULL
;
9411 encoder
->connectors_active
= false;
9412 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
9413 encoder
->base
.base
.id
,
9414 drm_get_encoder_name(&encoder
->base
),
9415 encoder
->base
.crtc
? "enabled" : "disabled",
9419 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
,
9421 if (connector
->get_hw_state(connector
)) {
9422 connector
->base
.dpms
= DRM_MODE_DPMS_ON
;
9423 connector
->encoder
->connectors_active
= true;
9424 connector
->base
.encoder
= &connector
->encoder
->base
;
9426 connector
->base
.dpms
= DRM_MODE_DPMS_OFF
;
9427 connector
->base
.encoder
= NULL
;
9429 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
9430 connector
->base
.base
.id
,
9431 drm_get_connector_name(&connector
->base
),
9432 connector
->base
.encoder
? "enabled" : "disabled");
9435 /* HW state is read out, now we need to sanitize this mess. */
9436 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
,
9438 intel_sanitize_encoder(encoder
);
9441 for_each_pipe(pipe
) {
9442 crtc
= to_intel_crtc(dev_priv
->pipe_to_crtc_mapping
[pipe
]);
9443 intel_sanitize_crtc(crtc
);
9446 if (force_restore
) {
9448 * We need to use raw interfaces for restoring state to avoid
9449 * checking (bogus) intermediate states.
9451 for_each_pipe(pipe
) {
9452 struct drm_crtc
*crtc
=
9453 dev_priv
->pipe_to_crtc_mapping
[pipe
];
9455 __intel_set_mode(crtc
, &crtc
->mode
, crtc
->x
, crtc
->y
,
9458 list_for_each_entry(plane
, &dev
->mode_config
.plane_list
, head
)
9459 intel_plane_restore(plane
);
9461 i915_redisable_vga(dev
);
9463 intel_modeset_update_staged_output_state(dev
);
9466 intel_modeset_check_state(dev
);
9468 drm_mode_config_reset(dev
);
9471 void intel_modeset_gem_init(struct drm_device
*dev
)
9473 intel_modeset_init_hw(dev
);
9475 intel_setup_overlay(dev
);
9477 intel_modeset_setup_hw_state(dev
, false);
9480 void intel_modeset_cleanup(struct drm_device
*dev
)
9482 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
9483 struct drm_crtc
*crtc
;
9484 struct intel_crtc
*intel_crtc
;
9486 drm_kms_helper_poll_fini(dev
);
9487 mutex_lock(&dev
->struct_mutex
);
9490 intel_unregister_dsm_handler();
9493 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
9494 /* Skip inactive CRTCs */
9498 intel_crtc
= to_intel_crtc(crtc
);
9499 intel_increase_pllclock(crtc
);
9502 intel_disable_fbc(dev
);
9504 intel_disable_gt_powersave(dev
);
9506 ironlake_teardown_rc6(dev
);
9508 if (IS_VALLEYVIEW(dev
))
9511 mutex_unlock(&dev
->struct_mutex
);
9513 /* Disable the irq before mode object teardown, for the irq might
9514 * enqueue unpin/hotplug work. */
9515 drm_irq_uninstall(dev
);
9516 cancel_work_sync(&dev_priv
->hotplug_work
);
9517 cancel_work_sync(&dev_priv
->rps
.work
);
9519 /* flush any delayed tasks or pending work */
9520 flush_scheduled_work();
9522 /* destroy backlight, if any, before the connectors */
9523 intel_panel_destroy_backlight(dev
);
9525 drm_mode_config_cleanup(dev
);
9527 intel_cleanup_overlay(dev
);
9531 * Return which encoder is currently attached for connector.
9533 struct drm_encoder
*intel_best_encoder(struct drm_connector
*connector
)
9535 return &intel_attached_encoder(connector
)->base
;
9538 void intel_connector_attach_encoder(struct intel_connector
*connector
,
9539 struct intel_encoder
*encoder
)
9541 connector
->encoder
= encoder
;
9542 drm_mode_connector_attach_encoder(&connector
->base
,
9547 * set vga decode state - true == enable VGA decode
9549 int intel_modeset_vga_set_state(struct drm_device
*dev
, bool state
)
9551 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
9554 pci_read_config_word(dev_priv
->bridge_dev
, INTEL_GMCH_CTRL
, &gmch_ctrl
);
9556 gmch_ctrl
&= ~INTEL_GMCH_VGA_DISABLE
;
9558 gmch_ctrl
|= INTEL_GMCH_VGA_DISABLE
;
9559 pci_write_config_word(dev_priv
->bridge_dev
, INTEL_GMCH_CTRL
, gmch_ctrl
);
9563 #ifdef CONFIG_DEBUG_FS
9564 #include <linux/seq_file.h>
9566 struct intel_display_error_state
{
9567 struct intel_cursor_error_state
{
9572 } cursor
[I915_MAX_PIPES
];
9574 struct intel_pipe_error_state
{
9584 } pipe
[I915_MAX_PIPES
];
9586 struct intel_plane_error_state
{
9594 } plane
[I915_MAX_PIPES
];
9597 struct intel_display_error_state
*
9598 intel_display_capture_error_state(struct drm_device
*dev
)
9600 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
9601 struct intel_display_error_state
*error
;
9602 enum transcoder cpu_transcoder
;
9605 error
= kmalloc(sizeof(*error
), M_DRM
, M_WAITOK
| M_NULLOK
);
9610 cpu_transcoder
= intel_pipe_to_cpu_transcoder(dev_priv
, i
);
9612 if (INTEL_INFO(dev
)->gen
<= 6 || IS_VALLEYVIEW(dev
)) {
9613 error
->cursor
[i
].control
= I915_READ(CURCNTR(i
));
9614 error
->cursor
[i
].position
= I915_READ(CURPOS(i
));
9615 error
->cursor
[i
].base
= I915_READ(CURBASE(i
));
9617 error
->cursor
[i
].control
= I915_READ(CURCNTR_IVB(i
));
9618 error
->cursor
[i
].position
= I915_READ(CURPOS_IVB(i
));
9619 error
->cursor
[i
].base
= I915_READ(CURBASE_IVB(i
));
9622 error
->plane
[i
].control
= I915_READ(DSPCNTR(i
));
9623 error
->plane
[i
].stride
= I915_READ(DSPSTRIDE(i
));
9624 if (INTEL_INFO(dev
)->gen
<= 3) {
9625 error
->plane
[i
].size
= I915_READ(DSPSIZE(i
));
9626 error
->plane
[i
].pos
= I915_READ(DSPPOS(i
));
9628 if (INTEL_INFO(dev
)->gen
<= 7 && !IS_HASWELL(dev
))
9629 error
->plane
[i
].addr
= I915_READ(DSPADDR(i
));
9630 if (INTEL_INFO(dev
)->gen
>= 4) {
9631 error
->plane
[i
].surface
= I915_READ(DSPSURF(i
));
9632 error
->plane
[i
].tile_offset
= I915_READ(DSPTILEOFF(i
));
9635 error
->pipe
[i
].conf
= I915_READ(PIPECONF(cpu_transcoder
));
9636 error
->pipe
[i
].source
= I915_READ(PIPESRC(i
));
9637 error
->pipe
[i
].htotal
= I915_READ(HTOTAL(cpu_transcoder
));
9638 error
->pipe
[i
].hblank
= I915_READ(HBLANK(cpu_transcoder
));
9639 error
->pipe
[i
].hsync
= I915_READ(HSYNC(cpu_transcoder
));
9640 error
->pipe
[i
].vtotal
= I915_READ(VTOTAL(cpu_transcoder
));
9641 error
->pipe
[i
].vblank
= I915_READ(VBLANK(cpu_transcoder
));
9642 error
->pipe
[i
].vsync
= I915_READ(VSYNC(cpu_transcoder
));
9649 intel_display_print_error_state(struct seq_file
*m
,
9650 struct drm_device
*dev
,
9651 struct intel_display_error_state
*error
)
9655 seq_printf(m
, "Num Pipes: %d\n", INTEL_INFO(dev
)->num_pipes
);
9657 seq_printf(m
, "Pipe [%d]:\n", i
);
9658 seq_printf(m
, " CONF: %08x\n", error
->pipe
[i
].conf
);
9659 seq_printf(m
, " SRC: %08x\n", error
->pipe
[i
].source
);
9660 seq_printf(m
, " HTOTAL: %08x\n", error
->pipe
[i
].htotal
);
9661 seq_printf(m
, " HBLANK: %08x\n", error
->pipe
[i
].hblank
);
9662 seq_printf(m
, " HSYNC: %08x\n", error
->pipe
[i
].hsync
);
9663 seq_printf(m
, " VTOTAL: %08x\n", error
->pipe
[i
].vtotal
);
9664 seq_printf(m
, " VBLANK: %08x\n", error
->pipe
[i
].vblank
);
9665 seq_printf(m
, " VSYNC: %08x\n", error
->pipe
[i
].vsync
);
9667 seq_printf(m
, "Plane [%d]:\n", i
);
9668 seq_printf(m
, " CNTR: %08x\n", error
->plane
[i
].control
);
9669 seq_printf(m
, " STRIDE: %08x\n", error
->plane
[i
].stride
);
9670 if (INTEL_INFO(dev
)->gen
<= 3) {
9671 seq_printf(m
, " SIZE: %08x\n", error
->plane
[i
].size
);
9672 seq_printf(m
, " POS: %08x\n", error
->plane
[i
].pos
);
9674 if (INTEL_INFO(dev
)->gen
<= 7 && !IS_HASWELL(dev
))
9675 seq_printf(m
, " ADDR: %08x\n", error
->plane
[i
].addr
);
9676 if (INTEL_INFO(dev
)->gen
>= 4) {
9677 seq_printf(m
, " SURF: %08x\n", error
->plane
[i
].surface
);
9678 seq_printf(m
, " TILEOFF: %08x\n", error
->plane
[i
].tile_offset
);
9681 seq_printf(m
, "Cursor [%d]:\n", i
);
9682 seq_printf(m
, " CNTR: %08x\n", error
->cursor
[i
].control
);
9683 seq_printf(m
, " POS: %08x\n", error
->cursor
[i
].position
);
9684 seq_printf(m
, " BASE: %08x\n", error
->cursor
[i
].base
);