drm/i915: Update to Linux 4.7.10
[dragonfly.git] / sys / dev / drm / i915 / intel_dp.c
blobc643bc8c714d0de2cf86efa364b7413ac0576e62
1 /*
2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/export.h>
30 #include <linux/notifier.h>
31 #include <drm/drmP.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_crtc.h>
34 #include <drm/drm_crtc_helper.h>
35 #include <drm/drm_edid.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
38 #include "i915_drv.h"
40 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
42 static int disable_aux_irq = 0;
43 TUNABLE_INT("drm.i915.disable_aux_irq", &disable_aux_irq);
45 /* Compliance test status bits */
46 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
47 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
51 struct dp_link_dpll {
52 int clock;
53 struct dpll dpll;
56 static const struct dp_link_dpll gen4_dpll[] = {
57 { 162000,
58 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
59 { 270000,
60 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
63 static const struct dp_link_dpll pch_dpll[] = {
64 { 162000,
65 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
66 { 270000,
67 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
70 static const struct dp_link_dpll vlv_dpll[] = {
71 { 162000,
72 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
73 { 270000,
74 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
78 * CHV supports eDP 1.4 that have more link rates.
79 * Below only provides the fixed rate but exclude variable rate.
81 static const struct dp_link_dpll chv_dpll[] = {
83 * CHV requires to program fractional division for m2.
84 * m2 is stored in fixed point format using formula below
85 * (m2_int << 22) | m2_fraction
87 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
88 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
89 { 270000, /* m2_int = 27, m2_fraction = 0 */
90 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
91 { 540000, /* m2_int = 27, m2_fraction = 0 */
92 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
95 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
96 324000, 432000, 540000 };
97 static const int skl_rates[] = { 162000, 216000, 270000,
98 324000, 432000, 540000 };
99 static const int default_rates[] = { 162000, 270000, 540000 };
102 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
103 * @intel_dp: DP struct
105 * If a CPU or PCH DP output is attached to an eDP panel, this function
106 * will return true, and false otherwise.
108 static bool is_edp(struct intel_dp *intel_dp)
110 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
115 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
117 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
119 return intel_dig_port->base.base.dev;
122 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
124 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
127 static void intel_dp_link_down(struct intel_dp *intel_dp);
128 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
129 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
130 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
131 static void vlv_steal_power_sequencer(struct drm_device *dev,
132 enum i915_pipe pipe);
133 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
135 static unsigned int intel_dp_unused_lane_mask(int lane_count)
137 return ~((1 << lane_count) - 1) & 0xf;
140 static int
141 intel_dp_max_link_bw(struct intel_dp *intel_dp)
143 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
145 switch (max_link_bw) {
146 case DP_LINK_BW_1_62:
147 case DP_LINK_BW_2_7:
148 case DP_LINK_BW_5_4:
149 break;
150 default:
151 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
152 max_link_bw);
153 max_link_bw = DP_LINK_BW_1_62;
154 break;
156 return max_link_bw;
159 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
161 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
162 u8 source_max, sink_max;
164 source_max = intel_dig_port->max_lanes;
165 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
167 return min(source_max, sink_max);
171 * The units on the numbers in the next two are... bizarre. Examples will
172 * make it clearer; this one parallels an example in the eDP spec.
174 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
176 * 270000 * 1 * 8 / 10 == 216000
178 * The actual data capacity of that configuration is 2.16Gbit/s, so the
179 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
180 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
181 * 119000. At 18bpp that's 2142000 kilobits per second.
183 * Thus the strange-looking division by 10 in intel_dp_link_required, to
184 * get the result in decakilobits instead of kilobits.
187 static int
188 intel_dp_link_required(int pixel_clock, int bpp)
190 return (pixel_clock * bpp + 9) / 10;
193 static int
194 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
196 return (max_link_clock * max_lanes * 8) / 10;
199 static enum drm_mode_status
200 intel_dp_mode_valid(struct drm_connector *connector,
201 struct drm_display_mode *mode)
203 struct intel_dp *intel_dp = intel_attached_dp(connector);
204 struct intel_connector *intel_connector = to_intel_connector(connector);
205 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
206 int target_clock = mode->clock;
207 int max_rate, mode_rate, max_lanes, max_link_clock;
208 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
210 if (is_edp(intel_dp) && fixed_mode) {
211 if (mode->hdisplay > fixed_mode->hdisplay)
212 return MODE_PANEL;
214 if (mode->vdisplay > fixed_mode->vdisplay)
215 return MODE_PANEL;
217 target_clock = fixed_mode->clock;
220 max_link_clock = intel_dp_max_link_rate(intel_dp);
221 max_lanes = intel_dp_max_lane_count(intel_dp);
223 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
224 mode_rate = intel_dp_link_required(target_clock, 18);
226 if (mode_rate > max_rate || target_clock > max_dotclk)
227 return MODE_CLOCK_HIGH;
229 if (mode->clock < 10000)
230 return MODE_CLOCK_LOW;
232 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
233 return MODE_H_ILLEGAL;
235 return MODE_OK;
238 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
240 int i;
241 uint32_t v = 0;
243 if (src_bytes > 4)
244 src_bytes = 4;
245 for (i = 0; i < src_bytes; i++)
246 v |= ((uint32_t) src[i]) << ((3-i) * 8);
247 return v;
250 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
252 int i;
253 if (dst_bytes > 4)
254 dst_bytes = 4;
255 for (i = 0; i < dst_bytes; i++)
256 dst[i] = src >> ((3-i) * 8);
259 static void
260 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
261 struct intel_dp *intel_dp);
262 static void
263 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
264 struct intel_dp *intel_dp);
266 static void pps_lock(struct intel_dp *intel_dp)
268 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
269 struct intel_encoder *encoder = &intel_dig_port->base;
270 struct drm_device *dev = encoder->base.dev;
271 struct drm_i915_private *dev_priv = dev->dev_private;
272 enum intel_display_power_domain power_domain;
275 * See vlv_power_sequencer_reset() why we need
276 * a power domain reference here.
278 power_domain = intel_display_port_aux_power_domain(encoder);
279 intel_display_power_get(dev_priv, power_domain);
281 mutex_lock(&dev_priv->pps_mutex);
284 static void pps_unlock(struct intel_dp *intel_dp)
286 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
287 struct intel_encoder *encoder = &intel_dig_port->base;
288 struct drm_device *dev = encoder->base.dev;
289 struct drm_i915_private *dev_priv = dev->dev_private;
290 enum intel_display_power_domain power_domain;
292 mutex_unlock(&dev_priv->pps_mutex);
294 power_domain = intel_display_port_aux_power_domain(encoder);
295 intel_display_power_put(dev_priv, power_domain);
298 static void
299 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
301 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
302 struct drm_device *dev = intel_dig_port->base.base.dev;
303 struct drm_i915_private *dev_priv = dev->dev_private;
304 enum i915_pipe pipe = intel_dp->pps_pipe;
305 bool pll_enabled, release_cl_override = false;
306 enum dpio_phy phy = DPIO_PHY(pipe);
307 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
308 uint32_t DP;
310 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
311 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
312 pipe_name(pipe), port_name(intel_dig_port->port)))
313 return;
315 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
316 pipe_name(pipe), port_name(intel_dig_port->port));
318 /* Preserve the BIOS-computed detected bit. This is
319 * supposed to be read-only.
321 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
322 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
323 DP |= DP_PORT_WIDTH(1);
324 DP |= DP_LINK_TRAIN_PAT_1;
326 if (IS_CHERRYVIEW(dev))
327 DP |= DP_PIPE_SELECT_CHV(pipe);
328 else if (pipe == PIPE_B)
329 DP |= DP_PIPEB_SELECT;
331 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
334 * The DPLL for the pipe must be enabled for this to work.
335 * So enable temporarily it if it's not already enabled.
337 if (!pll_enabled) {
338 release_cl_override = IS_CHERRYVIEW(dev) &&
339 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
341 if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
342 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
343 DRM_ERROR("Failed to force on pll for pipe %c!\n",
344 pipe_name(pipe));
345 return;
350 * Similar magic as in intel_dp_enable_port().
351 * We _must_ do this port enable + disable trick
352 * to make this power seqeuencer lock onto the port.
353 * Otherwise even VDD force bit won't work.
355 I915_WRITE(intel_dp->output_reg, DP);
356 POSTING_READ(intel_dp->output_reg);
358 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
359 POSTING_READ(intel_dp->output_reg);
361 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
362 POSTING_READ(intel_dp->output_reg);
364 if (!pll_enabled) {
365 vlv_force_pll_off(dev, pipe);
367 if (release_cl_override)
368 chv_phy_powergate_ch(dev_priv, phy, ch, false);
372 static enum i915_pipe
373 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
375 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
376 struct drm_device *dev = intel_dig_port->base.base.dev;
377 struct drm_i915_private *dev_priv = dev->dev_private;
378 struct intel_encoder *encoder;
379 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
380 enum i915_pipe pipe;
382 lockdep_assert_held(&dev_priv->pps_mutex);
384 /* We should never land here with regular DP ports */
385 WARN_ON(!is_edp(intel_dp));
387 if (intel_dp->pps_pipe != INVALID_PIPE)
388 return intel_dp->pps_pipe;
391 * We don't have power sequencer currently.
392 * Pick one that's not used by other ports.
394 for_each_intel_encoder(dev, encoder) {
395 struct intel_dp *tmp;
397 if (encoder->type != INTEL_OUTPUT_EDP)
398 continue;
400 tmp = enc_to_intel_dp(&encoder->base);
402 if (tmp->pps_pipe != INVALID_PIPE)
403 pipes &= ~(1 << tmp->pps_pipe);
407 * Didn't find one. This should not happen since there
408 * are two power sequencers and up to two eDP ports.
410 if (WARN_ON(pipes == 0))
411 pipe = PIPE_A;
412 else
413 pipe = ffs(pipes) - 1;
415 vlv_steal_power_sequencer(dev, pipe);
416 intel_dp->pps_pipe = pipe;
418 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
419 pipe_name(intel_dp->pps_pipe),
420 port_name(intel_dig_port->port));
422 /* init power sequencer on this pipe and port */
423 intel_dp_init_panel_power_sequencer(dev, intel_dp);
424 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
427 * Even vdd force doesn't work until we've made
428 * the power sequencer lock in on the port.
430 vlv_power_sequencer_kick(intel_dp);
432 return intel_dp->pps_pipe;
435 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
436 enum i915_pipe pipe);
438 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
439 enum i915_pipe pipe)
441 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
444 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
445 enum i915_pipe pipe)
447 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
450 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
451 enum i915_pipe pipe)
453 return true;
456 static enum i915_pipe
457 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
458 enum port port,
459 vlv_pipe_check pipe_check)
461 enum i915_pipe pipe;
463 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
464 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
465 PANEL_PORT_SELECT_MASK;
467 if (port_sel != PANEL_PORT_SELECT_VLV(port))
468 continue;
470 if (!pipe_check(dev_priv, pipe))
471 continue;
473 return pipe;
476 return INVALID_PIPE;
479 static void
480 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
482 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
483 struct drm_device *dev = intel_dig_port->base.base.dev;
484 struct drm_i915_private *dev_priv = dev->dev_private;
485 enum port port = intel_dig_port->port;
487 lockdep_assert_held(&dev_priv->pps_mutex);
489 /* try to find a pipe with this port selected */
490 /* first pick one where the panel is on */
491 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
492 vlv_pipe_has_pp_on);
493 /* didn't find one? pick one where vdd is on */
494 if (intel_dp->pps_pipe == INVALID_PIPE)
495 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
496 vlv_pipe_has_vdd_on);
497 /* didn't find one? pick one with just the correct port */
498 if (intel_dp->pps_pipe == INVALID_PIPE)
499 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
500 vlv_pipe_any);
502 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
503 if (intel_dp->pps_pipe == INVALID_PIPE) {
504 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
505 port_name(port));
506 return;
509 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
510 port_name(port), pipe_name(intel_dp->pps_pipe));
512 intel_dp_init_panel_power_sequencer(dev, intel_dp);
513 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
516 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
518 struct drm_device *dev = dev_priv->dev;
519 struct intel_encoder *encoder;
521 if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
522 return;
525 * We can't grab pps_mutex here due to deadlock with power_domain
526 * mutex when power_domain functions are called while holding pps_mutex.
527 * That also means that in order to use pps_pipe the code needs to
528 * hold both a power domain reference and pps_mutex, and the power domain
529 * reference get/put must be done while _not_ holding pps_mutex.
530 * pps_{lock,unlock}() do these steps in the correct order, so one
531 * should use them always.
534 for_each_intel_encoder(dev, encoder) {
535 struct intel_dp *intel_dp;
537 if (encoder->type != INTEL_OUTPUT_EDP)
538 continue;
540 intel_dp = enc_to_intel_dp(&encoder->base);
541 intel_dp->pps_pipe = INVALID_PIPE;
545 static i915_reg_t
546 _pp_ctrl_reg(struct intel_dp *intel_dp)
548 struct drm_device *dev = intel_dp_to_dev(intel_dp);
550 if (IS_BROXTON(dev))
551 return BXT_PP_CONTROL(0);
552 else if (HAS_PCH_SPLIT(dev))
553 return PCH_PP_CONTROL;
554 else
555 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
558 static i915_reg_t
559 _pp_stat_reg(struct intel_dp *intel_dp)
561 struct drm_device *dev = intel_dp_to_dev(intel_dp);
563 if (IS_BROXTON(dev))
564 return BXT_PP_STATUS(0);
565 else if (HAS_PCH_SPLIT(dev))
566 return PCH_PP_STATUS;
567 else
568 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
571 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
572 This function only applicable when panel PM state is not to be tracked */
573 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
574 void *unused)
576 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
577 edp_notifier);
578 struct drm_device *dev = intel_dp_to_dev(intel_dp);
579 struct drm_i915_private *dev_priv = dev->dev_private;
581 #if 0
582 if (!is_edp(intel_dp) || code != SYS_RESTART)
583 return 0;
584 #endif
586 pps_lock(intel_dp);
588 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
589 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
590 i915_reg_t pp_ctrl_reg, pp_div_reg;
591 u32 pp_div;
593 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
594 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
595 pp_div = I915_READ(pp_div_reg);
596 pp_div &= PP_REFERENCE_DIVIDER_MASK;
598 /* 0x1F write to PP_DIV_REG sets max cycle delay */
599 I915_WRITE(pp_div_reg, pp_div | 0x1F);
600 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
601 msleep(intel_dp->panel_power_cycle_delay);
604 pps_unlock(intel_dp);
606 return 0;
609 static bool edp_have_panel_power(struct intel_dp *intel_dp)
611 struct drm_device *dev = intel_dp_to_dev(intel_dp);
612 struct drm_i915_private *dev_priv = dev->dev_private;
614 lockdep_assert_held(&dev_priv->pps_mutex);
616 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
617 intel_dp->pps_pipe == INVALID_PIPE)
618 return false;
620 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
623 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
625 struct drm_device *dev = intel_dp_to_dev(intel_dp);
626 struct drm_i915_private *dev_priv = dev->dev_private;
628 lockdep_assert_held(&dev_priv->pps_mutex);
630 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
631 intel_dp->pps_pipe == INVALID_PIPE)
632 return false;
634 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
637 static void
638 intel_dp_check_edp(struct intel_dp *intel_dp)
640 struct drm_device *dev = intel_dp_to_dev(intel_dp);
641 struct drm_i915_private *dev_priv = dev->dev_private;
643 if (!is_edp(intel_dp))
644 return;
646 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
647 WARN(1, "eDP powered off while attempting aux channel communication.\n");
648 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
649 I915_READ(_pp_stat_reg(intel_dp)),
650 I915_READ(_pp_ctrl_reg(intel_dp)));
654 static uint32_t
655 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
657 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
658 struct drm_device *dev = intel_dig_port->base.base.dev;
659 struct drm_i915_private *dev_priv = dev->dev_private;
660 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
661 uint32_t status;
662 bool done;
664 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
665 if (has_aux_irq)
666 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
667 msecs_to_jiffies_timeout(10));
668 else
669 done = wait_for(C, 10) == 0;
670 if (!done)
671 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
672 has_aux_irq);
673 #undef C
675 return status;
678 static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
680 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
681 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
683 if (index)
684 return 0;
687 * The clock divider is based off the hrawclk, and would like to run at
688 * 2MHz. So, take the hrawclk value and divide by 2000 and use that
690 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
693 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
695 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
696 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
698 if (index)
699 return 0;
702 * The clock divider is based off the cdclk or PCH rawclk, and would
703 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
704 * divide by 2000 and use that
706 if (intel_dig_port->port == PORT_A)
707 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
708 else
709 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
712 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
714 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
715 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
717 if (intel_dig_port->port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
718 /* Workaround for non-ULT HSW */
719 switch (index) {
720 case 0: return 63;
721 case 1: return 72;
722 default: return 0;
726 return ilk_get_aux_clock_divider(intel_dp, index);
729 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
732 * SKL doesn't need us to program the AUX clock divider (Hardware will
733 * derive the clock from CDCLK automatically). We still implement the
734 * get_aux_clock_divider vfunc to plug-in into the existing code.
736 return index ? 0 : 1;
739 static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
740 bool has_aux_irq,
741 int send_bytes,
742 uint32_t aux_clock_divider)
744 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
745 struct drm_device *dev = intel_dig_port->base.base.dev;
746 uint32_t precharge, timeout;
748 if (IS_GEN6(dev))
749 precharge = 3;
750 else
751 precharge = 5;
753 if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
754 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
755 else
756 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
758 return DP_AUX_CH_CTL_SEND_BUSY |
759 DP_AUX_CH_CTL_DONE |
760 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
761 DP_AUX_CH_CTL_TIME_OUT_ERROR |
762 timeout |
763 DP_AUX_CH_CTL_RECEIVE_ERROR |
764 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
765 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
766 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
769 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
770 bool has_aux_irq,
771 int send_bytes,
772 uint32_t unused)
774 return DP_AUX_CH_CTL_SEND_BUSY |
775 DP_AUX_CH_CTL_DONE |
776 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
777 DP_AUX_CH_CTL_TIME_OUT_ERROR |
778 DP_AUX_CH_CTL_TIME_OUT_1600us |
779 DP_AUX_CH_CTL_RECEIVE_ERROR |
780 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
781 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
784 static int
785 intel_dp_aux_ch(struct intel_dp *intel_dp,
786 const uint8_t *send, int send_bytes,
787 uint8_t *recv, int recv_size)
789 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
790 struct drm_device *dev = intel_dig_port->base.base.dev;
791 struct drm_i915_private *dev_priv = dev->dev_private;
792 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
793 uint32_t aux_clock_divider;
794 int i, ret, recv_bytes;
795 uint32_t status;
796 int try, clock = 0;
797 #ifdef __DragonFly__
798 bool has_aux_irq = HAS_AUX_IRQ(dev) && !disable_aux_irq;
799 #else
800 bool has_aux_irq = HAS_AUX_IRQ(dev);
801 #endif
802 bool vdd;
804 pps_lock(intel_dp);
807 * We will be called with VDD already enabled for dpcd/edid/oui reads.
808 * In such cases we want to leave VDD enabled and it's up to upper layers
809 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
810 * ourselves.
812 vdd = edp_panel_vdd_on(intel_dp);
814 /* dp aux is extremely sensitive to irq latency, hence request the
815 * lowest possible wakeup latency and so prevent the cpu from going into
816 * deep sleep states.
818 pm_qos_update_request(&dev_priv->pm_qos, 0);
820 intel_dp_check_edp(intel_dp);
822 /* Try to wait for any previous AUX channel activity */
823 for (try = 0; try < 3; try++) {
824 status = I915_READ_NOTRACE(ch_ctl);
825 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
826 break;
827 msleep(1);
830 if (try == 3) {
831 static u32 last_status = -1;
832 const u32 status = I915_READ(ch_ctl);
834 if (status != last_status) {
835 WARN(1, "dp_aux_ch not started status 0x%08x\n",
836 status);
837 last_status = status;
840 ret = -EBUSY;
841 goto out;
844 /* Only 5 data registers! */
845 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
846 ret = -E2BIG;
847 goto out;
850 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
851 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
852 has_aux_irq,
853 send_bytes,
854 aux_clock_divider);
856 /* Must try at least 3 times according to DP spec */
857 for (try = 0; try < 5; try++) {
858 /* Load the send data into the aux channel data registers */
859 for (i = 0; i < send_bytes; i += 4)
860 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
861 intel_dp_pack_aux(send + i,
862 send_bytes - i));
864 /* Send the command and wait for it to complete */
865 I915_WRITE(ch_ctl, send_ctl);
867 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
869 /* Clear done status and any errors */
870 I915_WRITE(ch_ctl,
871 status |
872 DP_AUX_CH_CTL_DONE |
873 DP_AUX_CH_CTL_TIME_OUT_ERROR |
874 DP_AUX_CH_CTL_RECEIVE_ERROR);
876 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
877 continue;
879 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
880 * 400us delay required for errors and timeouts
881 * Timeout errors from the HW already meet this
882 * requirement so skip to next iteration
884 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
885 usleep_range(400, 500);
886 continue;
888 if (status & DP_AUX_CH_CTL_DONE)
889 goto done;
893 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
894 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
895 ret = -EBUSY;
896 goto out;
899 done:
900 /* Check for timeout or receive error.
901 * Timeouts occur when the sink is not connected
903 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
904 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
905 ret = -EIO;
906 goto out;
909 /* Timeouts occur when the device isn't connected, so they're
910 * "normal" -- don't fill the kernel log with these */
911 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
912 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
913 ret = -ETIMEDOUT;
914 goto out;
917 /* Unload any bytes sent back from the other side */
918 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
919 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
922 * By BSpec: "Message sizes of 0 or >20 are not allowed."
923 * We have no idea of what happened so we return -EBUSY so
924 * drm layer takes care for the necessary retries.
926 if (recv_bytes == 0 || recv_bytes > 20) {
927 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
928 recv_bytes);
930 * FIXME: This patch was created on top of a series that
931 * organize the retries at drm level. There EBUSY should
932 * also take care for 1ms wait before retrying.
933 * That aux retries re-org is still needed and after that is
934 * merged we remove this sleep from here.
936 usleep_range(1000, 1500);
937 ret = -EBUSY;
938 goto out;
941 if (recv_bytes > recv_size)
942 recv_bytes = recv_size;
944 for (i = 0; i < recv_bytes; i += 4)
945 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
946 recv + i, recv_bytes - i);
948 ret = recv_bytes;
949 out:
950 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
952 if (vdd)
953 edp_panel_vdd_off(intel_dp, false);
955 pps_unlock(intel_dp);
957 return ret;
960 #define BARE_ADDRESS_SIZE 3
961 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
962 static ssize_t
963 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
965 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
966 uint8_t txbuf[20], rxbuf[20];
967 size_t txsize, rxsize;
968 int ret;
970 txbuf[0] = (msg->request << 4) |
971 ((msg->address >> 16) & 0xf);
972 txbuf[1] = (msg->address >> 8) & 0xff;
973 txbuf[2] = msg->address & 0xff;
974 txbuf[3] = msg->size - 1;
976 switch (msg->request & ~DP_AUX_I2C_MOT) {
977 case DP_AUX_NATIVE_WRITE:
978 case DP_AUX_I2C_WRITE:
979 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
980 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
981 rxsize = 2; /* 0 or 1 data bytes */
983 if (WARN_ON(txsize > 20))
984 return -E2BIG;
986 if (msg->buffer)
987 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
988 else
989 WARN_ON(msg->size);
991 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
992 if (ret > 0) {
993 msg->reply = rxbuf[0] >> 4;
995 if (ret > 1) {
996 /* Number of bytes written in a short write. */
997 ret = clamp_t(int, rxbuf[1], 0, msg->size);
998 } else {
999 /* Return payload size. */
1000 ret = msg->size;
1003 break;
1005 case DP_AUX_NATIVE_READ:
1006 case DP_AUX_I2C_READ:
1007 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1008 rxsize = msg->size + 1;
1010 if (WARN_ON(rxsize > 20))
1011 return -E2BIG;
1013 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1014 if (ret > 0) {
1015 msg->reply = rxbuf[0] >> 4;
1017 * Assume happy day, and copy the data. The caller is
1018 * expected to check msg->reply before touching it.
1020 * Return payload size.
1022 ret--;
1023 memcpy(msg->buffer, rxbuf + 1, ret);
1025 break;
1027 default:
1028 ret = -EINVAL;
1029 break;
1032 return ret;
1035 static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1036 enum port port)
1038 switch (port) {
1039 case PORT_B:
1040 case PORT_C:
1041 case PORT_D:
1042 return DP_AUX_CH_CTL(port);
1043 default:
1044 MISSING_CASE(port);
1045 return DP_AUX_CH_CTL(PORT_B);
1049 static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1050 enum port port, int index)
1052 switch (port) {
1053 case PORT_B:
1054 case PORT_C:
1055 case PORT_D:
1056 return DP_AUX_CH_DATA(port, index);
1057 default:
1058 MISSING_CASE(port);
1059 return DP_AUX_CH_DATA(PORT_B, index);
1063 static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1064 enum port port)
1066 switch (port) {
1067 case PORT_A:
1068 return DP_AUX_CH_CTL(port);
1069 case PORT_B:
1070 case PORT_C:
1071 case PORT_D:
1072 return PCH_DP_AUX_CH_CTL(port);
1073 default:
1074 MISSING_CASE(port);
1075 return DP_AUX_CH_CTL(PORT_A);
1079 static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1080 enum port port, int index)
1082 switch (port) {
1083 case PORT_A:
1084 return DP_AUX_CH_DATA(port, index);
1085 case PORT_B:
1086 case PORT_C:
1087 case PORT_D:
1088 return PCH_DP_AUX_CH_DATA(port, index);
1089 default:
1090 MISSING_CASE(port);
1091 return DP_AUX_CH_DATA(PORT_A, index);
1096 * On SKL we don't have Aux for port E so we rely
1097 * on VBT to set a proper alternate aux channel.
1099 static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1101 const struct ddi_vbt_port_info *info =
1102 &dev_priv->vbt.ddi_port_info[PORT_E];
1104 switch (info->alternate_aux_channel) {
1105 case DP_AUX_A:
1106 return PORT_A;
1107 case DP_AUX_B:
1108 return PORT_B;
1109 case DP_AUX_C:
1110 return PORT_C;
1111 case DP_AUX_D:
1112 return PORT_D;
1113 default:
1114 MISSING_CASE(info->alternate_aux_channel);
1115 return PORT_A;
1119 static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1120 enum port port)
1122 if (port == PORT_E)
1123 port = skl_porte_aux_port(dev_priv);
1125 switch (port) {
1126 case PORT_A:
1127 case PORT_B:
1128 case PORT_C:
1129 case PORT_D:
1130 return DP_AUX_CH_CTL(port);
1131 default:
1132 MISSING_CASE(port);
1133 return DP_AUX_CH_CTL(PORT_A);
1137 static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1138 enum port port, int index)
1140 if (port == PORT_E)
1141 port = skl_porte_aux_port(dev_priv);
1143 switch (port) {
1144 case PORT_A:
1145 case PORT_B:
1146 case PORT_C:
1147 case PORT_D:
1148 return DP_AUX_CH_DATA(port, index);
1149 default:
1150 MISSING_CASE(port);
1151 return DP_AUX_CH_DATA(PORT_A, index);
1155 static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1156 enum port port)
1158 if (INTEL_INFO(dev_priv)->gen >= 9)
1159 return skl_aux_ctl_reg(dev_priv, port);
1160 else if (HAS_PCH_SPLIT(dev_priv))
1161 return ilk_aux_ctl_reg(dev_priv, port);
1162 else
1163 return g4x_aux_ctl_reg(dev_priv, port);
1166 static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1167 enum port port, int index)
1169 if (INTEL_INFO(dev_priv)->gen >= 9)
1170 return skl_aux_data_reg(dev_priv, port, index);
1171 else if (HAS_PCH_SPLIT(dev_priv))
1172 return ilk_aux_data_reg(dev_priv, port, index);
1173 else
1174 return g4x_aux_data_reg(dev_priv, port, index);
1177 static void intel_aux_reg_init(struct intel_dp *intel_dp)
1179 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1180 enum port port = dp_to_dig_port(intel_dp)->port;
1181 int i;
1183 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1184 for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1185 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1188 static void
1189 intel_dp_aux_fini(struct intel_dp *intel_dp)
1191 drm_dp_aux_unregister(&intel_dp->aux);
1192 kfree(intel_dp->aux.name);
1195 static int
1196 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1198 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1199 enum port port = intel_dig_port->port;
1200 int ret;
1202 intel_aux_reg_init(intel_dp);
1204 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1205 if (!intel_dp->aux.name)
1206 return -ENOMEM;
1208 intel_dp->aux.dev = connector->base.kdev;
1209 intel_dp->aux.transfer = intel_dp_aux_transfer;
1211 #if 0
1212 DRM_DEBUG_KMS("registering %s bus for %s\n",
1213 intel_dp->aux.name,
1214 connector->base.kdev->kobj.name);
1215 #endif
1217 ret = drm_dp_aux_register(&intel_dp->aux);
1218 if (ret < 0) {
1219 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1220 intel_dp->aux.name, ret);
1221 kfree(intel_dp->aux.name);
1222 return ret;
1225 return 0;
1228 static void
1229 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1231 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1233 intel_dp_aux_fini(intel_dp);
1234 intel_connector_unregister(intel_connector);
1237 static int
1238 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1240 if (intel_dp->num_sink_rates) {
1241 *sink_rates = intel_dp->sink_rates;
1242 return intel_dp->num_sink_rates;
1245 *sink_rates = default_rates;
1247 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1250 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1252 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1253 struct drm_device *dev = dig_port->base.base.dev;
1255 /* WaDisableHBR2:skl */
1256 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
1257 return false;
1259 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1260 (INTEL_INFO(dev)->gen >= 9))
1261 return true;
1262 else
1263 return false;
1266 static int
1267 intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
1269 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1270 struct drm_device *dev = dig_port->base.base.dev;
1271 int size;
1273 if (IS_BROXTON(dev)) {
1274 *source_rates = bxt_rates;
1275 size = ARRAY_SIZE(bxt_rates);
1276 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1277 *source_rates = skl_rates;
1278 size = ARRAY_SIZE(skl_rates);
1279 } else {
1280 *source_rates = default_rates;
1281 size = ARRAY_SIZE(default_rates);
1284 /* This depends on the fact that 5.4 is last value in the array */
1285 if (!intel_dp_source_supports_hbr2(intel_dp))
1286 size--;
1288 return size;
1291 static void
1292 intel_dp_set_clock(struct intel_encoder *encoder,
1293 struct intel_crtc_state *pipe_config)
1295 struct drm_device *dev = encoder->base.dev;
1296 const struct dp_link_dpll *divisor = NULL;
1297 int i, count = 0;
1299 if (IS_G4X(dev)) {
1300 divisor = gen4_dpll;
1301 count = ARRAY_SIZE(gen4_dpll);
1302 } else if (HAS_PCH_SPLIT(dev)) {
1303 divisor = pch_dpll;
1304 count = ARRAY_SIZE(pch_dpll);
1305 } else if (IS_CHERRYVIEW(dev)) {
1306 divisor = chv_dpll;
1307 count = ARRAY_SIZE(chv_dpll);
1308 } else if (IS_VALLEYVIEW(dev)) {
1309 divisor = vlv_dpll;
1310 count = ARRAY_SIZE(vlv_dpll);
1313 if (divisor && count) {
1314 for (i = 0; i < count; i++) {
1315 if (pipe_config->port_clock == divisor[i].clock) {
1316 pipe_config->dpll = divisor[i].dpll;
1317 pipe_config->clock_set = true;
1318 break;
1324 static int intersect_rates(const int *source_rates, int source_len,
1325 const int *sink_rates, int sink_len,
1326 int *common_rates)
1328 int i = 0, j = 0, k = 0;
1330 while (i < source_len && j < sink_len) {
1331 if (source_rates[i] == sink_rates[j]) {
1332 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1333 return k;
1334 common_rates[k] = source_rates[i];
1335 ++k;
1336 ++i;
1337 ++j;
1338 } else if (source_rates[i] < sink_rates[j]) {
1339 ++i;
1340 } else {
1341 ++j;
1344 return k;
1347 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1348 int *common_rates)
1350 const int *source_rates, *sink_rates;
1351 int source_len, sink_len;
1353 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1354 source_len = intel_dp_source_rates(intel_dp, &source_rates);
1356 return intersect_rates(source_rates, source_len,
1357 sink_rates, sink_len,
1358 common_rates);
1361 static void snprintf_int_array(char *str, size_t len,
1362 const int *array, int nelem)
1364 int i;
1366 str[0] = '\0';
1368 for (i = 0; i < nelem; i++) {
1369 int r = ksnprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1370 if (r >= len)
1371 return;
1372 str += r;
1373 len -= r;
1377 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1379 const int *source_rates, *sink_rates;
1380 int source_len, sink_len, common_len;
1381 int common_rates[DP_MAX_SUPPORTED_RATES];
1382 char str[128]; /* FIXME: too big for stack? */
1384 if ((drm_debug & DRM_UT_KMS) == 0)
1385 return;
1387 source_len = intel_dp_source_rates(intel_dp, &source_rates);
1388 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1389 DRM_DEBUG_KMS("source rates: %s\n", str);
1391 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1392 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1393 DRM_DEBUG_KMS("sink rates: %s\n", str);
1395 common_len = intel_dp_common_rates(intel_dp, common_rates);
1396 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1397 DRM_DEBUG_KMS("common rates: %s\n", str);
1400 static int rate_to_index(int find, const int *rates)
1402 int i = 0;
1404 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1405 if (find == rates[i])
1406 break;
1408 return i;
1412 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1414 int rates[DP_MAX_SUPPORTED_RATES] = {};
1415 int len;
1417 len = intel_dp_common_rates(intel_dp, rates);
1418 if (WARN_ON(len <= 0))
1419 return 162000;
1421 return rates[rate_to_index(0, rates) - 1];
1424 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1426 return rate_to_index(rate, intel_dp->sink_rates);
1429 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1430 uint8_t *link_bw, uint8_t *rate_select)
1432 if (intel_dp->num_sink_rates) {
1433 *link_bw = 0;
1434 *rate_select =
1435 intel_dp_rate_select(intel_dp, port_clock);
1436 } else {
1437 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1438 *rate_select = 0;
1442 bool
1443 intel_dp_compute_config(struct intel_encoder *encoder,
1444 struct intel_crtc_state *pipe_config)
1446 struct drm_device *dev = encoder->base.dev;
1447 struct drm_i915_private *dev_priv = dev->dev_private;
1448 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1449 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1450 enum port port = dp_to_dig_port(intel_dp)->port;
1451 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1452 struct intel_connector *intel_connector = intel_dp->attached_connector;
1453 int lane_count, clock;
1454 int min_lane_count = 1;
1455 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1456 /* Conveniently, the link BW constants become indices with a shift...*/
1457 int min_clock = 0;
1458 int max_clock;
1459 int bpp, mode_rate;
1460 int link_avail, link_clock;
1461 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1462 int common_len;
1463 uint8_t link_bw, rate_select;
1465 common_len = intel_dp_common_rates(intel_dp, common_rates);
1467 /* No common link rates between source and sink */
1468 WARN_ON(common_len <= 0);
1470 max_clock = common_len - 1;
1472 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1473 pipe_config->has_pch_encoder = true;
1475 pipe_config->has_dp_encoder = true;
1476 pipe_config->has_drrs = false;
1477 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1479 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1480 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1481 adjusted_mode);
1483 if (INTEL_INFO(dev)->gen >= 9) {
1484 int ret;
1485 ret = skl_update_scaler_crtc(pipe_config);
1486 if (ret)
1487 return ret;
1490 if (HAS_GMCH_DISPLAY(dev))
1491 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1492 intel_connector->panel.fitting_mode);
1493 else
1494 intel_pch_panel_fitting(intel_crtc, pipe_config,
1495 intel_connector->panel.fitting_mode);
1498 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1499 return false;
1501 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1502 "max bw %d pixel clock %iKHz\n",
1503 max_lane_count, common_rates[max_clock],
1504 adjusted_mode->crtc_clock);
1506 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1507 * bpc in between. */
1508 bpp = pipe_config->pipe_bpp;
1509 if (is_edp(intel_dp)) {
1511 /* Get bpp from vbt only for panels that dont have bpp in edid */
1512 if (intel_connector->base.display_info.bpc == 0 &&
1513 (dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp)) {
1514 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1515 dev_priv->vbt.edp.bpp);
1516 bpp = dev_priv->vbt.edp.bpp;
1520 * Use the maximum clock and number of lanes the eDP panel
1521 * advertizes being capable of. The panels are generally
1522 * designed to support only a single clock and lane
1523 * configuration, and typically these values correspond to the
1524 * native resolution of the panel.
1526 min_lane_count = max_lane_count;
1527 min_clock = max_clock;
1530 for (; bpp >= 6*3; bpp -= 2*3) {
1531 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1532 bpp);
1534 for (clock = min_clock; clock <= max_clock; clock++) {
1535 for (lane_count = min_lane_count;
1536 lane_count <= max_lane_count;
1537 lane_count <<= 1) {
1539 link_clock = common_rates[clock];
1540 link_avail = intel_dp_max_data_rate(link_clock,
1541 lane_count);
1543 if (mode_rate <= link_avail) {
1544 goto found;
1550 return false;
1552 found:
1553 if (intel_dp->color_range_auto) {
1555 * See:
1556 * CEA-861-E - 5.1 Default Encoding Parameters
1557 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1559 pipe_config->limited_color_range =
1560 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1561 } else {
1562 pipe_config->limited_color_range =
1563 intel_dp->limited_color_range;
1566 pipe_config->lane_count = lane_count;
1568 pipe_config->pipe_bpp = bpp;
1569 pipe_config->port_clock = common_rates[clock];
1571 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1572 &link_bw, &rate_select);
1574 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1575 link_bw, rate_select, pipe_config->lane_count,
1576 pipe_config->port_clock, bpp);
1577 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1578 mode_rate, link_avail);
1580 intel_link_compute_m_n(bpp, lane_count,
1581 adjusted_mode->crtc_clock,
1582 pipe_config->port_clock,
1583 &pipe_config->dp_m_n);
1585 if (intel_connector->panel.downclock_mode != NULL &&
1586 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1587 pipe_config->has_drrs = true;
1588 intel_link_compute_m_n(bpp, lane_count,
1589 intel_connector->panel.downclock_mode->clock,
1590 pipe_config->port_clock,
1591 &pipe_config->dp_m2_n2);
1594 if (!HAS_DDI(dev))
1595 intel_dp_set_clock(encoder, pipe_config);
1597 return true;
1600 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1601 const struct intel_crtc_state *pipe_config)
1603 intel_dp->link_rate = pipe_config->port_clock;
1604 intel_dp->lane_count = pipe_config->lane_count;
1607 static void intel_dp_prepare(struct intel_encoder *encoder)
1609 struct drm_device *dev = encoder->base.dev;
1610 struct drm_i915_private *dev_priv = dev->dev_private;
1611 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1612 enum port port = dp_to_dig_port(intel_dp)->port;
1613 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1614 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1616 intel_dp_set_link_params(intel_dp, crtc->config);
1619 * There are four kinds of DP registers:
1621 * IBX PCH
1622 * SNB CPU
1623 * IVB CPU
1624 * CPT PCH
1626 * IBX PCH and CPU are the same for almost everything,
1627 * except that the CPU DP PLL is configured in this
1628 * register
1630 * CPT PCH is quite different, having many bits moved
1631 * to the TRANS_DP_CTL register instead. That
1632 * configuration happens (oddly) in ironlake_pch_enable
1635 /* Preserve the BIOS-computed detected bit. This is
1636 * supposed to be read-only.
1638 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1640 /* Handle DP bits in common between all three register formats */
1641 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1642 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1644 /* Split out the IBX/CPU vs CPT settings */
1646 if (IS_GEN7(dev) && port == PORT_A) {
1647 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1648 intel_dp->DP |= DP_SYNC_HS_HIGH;
1649 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1650 intel_dp->DP |= DP_SYNC_VS_HIGH;
1651 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1653 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1654 intel_dp->DP |= DP_ENHANCED_FRAMING;
1656 intel_dp->DP |= crtc->pipe << 29;
1657 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1658 u32 trans_dp;
1660 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1662 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1663 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1664 trans_dp |= TRANS_DP_ENH_FRAMING;
1665 else
1666 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1667 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1668 } else {
1669 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1670 !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
1671 intel_dp->DP |= DP_COLOR_RANGE_16_235;
1673 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1674 intel_dp->DP |= DP_SYNC_HS_HIGH;
1675 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1676 intel_dp->DP |= DP_SYNC_VS_HIGH;
1677 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1679 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1680 intel_dp->DP |= DP_ENHANCED_FRAMING;
1682 if (IS_CHERRYVIEW(dev))
1683 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1684 else if (crtc->pipe == PIPE_B)
1685 intel_dp->DP |= DP_PIPEB_SELECT;
1689 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1690 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1692 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1693 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1695 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1696 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1698 static void wait_panel_status(struct intel_dp *intel_dp,
1699 u32 mask,
1700 u32 value)
1702 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1703 struct drm_i915_private *dev_priv = dev->dev_private;
1704 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1706 lockdep_assert_held(&dev_priv->pps_mutex);
1708 pp_stat_reg = _pp_stat_reg(intel_dp);
1709 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1711 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1712 mask, value,
1713 I915_READ(pp_stat_reg),
1714 I915_READ(pp_ctrl_reg));
1716 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value,
1717 5 * USEC_PER_SEC, 10 * USEC_PER_MSEC))
1718 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1719 I915_READ(pp_stat_reg),
1720 I915_READ(pp_ctrl_reg));
1722 DRM_DEBUG_KMS("Wait complete\n");
1725 static void wait_panel_on(struct intel_dp *intel_dp)
1727 DRM_DEBUG_KMS("Wait for panel power on\n");
1728 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1731 static void wait_panel_off(struct intel_dp *intel_dp)
1733 DRM_DEBUG_KMS("Wait for panel power off time\n");
1734 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1737 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1739 ktime_t panel_power_on_time;
1740 s64 panel_power_off_duration;
1742 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1744 /* take the difference of currrent time and panel power off time
1745 * and then make panel wait for t11_t12 if needed. */
1746 panel_power_on_time = ktime_get_boottime();
1747 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
1749 /* When we disable the VDD override bit last we have to do the manual
1750 * wait. */
1751 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
1752 wait_remaining_ms_from_jiffies(jiffies,
1753 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
1755 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1758 static void wait_backlight_on(struct intel_dp *intel_dp)
1760 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1761 intel_dp->backlight_on_delay);
1764 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1766 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1767 intel_dp->backlight_off_delay);
1770 /* Read the current pp_control value, unlocking the register if it
1771 * is locked
1774 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1776 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1777 struct drm_i915_private *dev_priv = dev->dev_private;
1778 u32 control;
1780 lockdep_assert_held(&dev_priv->pps_mutex);
1782 control = I915_READ(_pp_ctrl_reg(intel_dp));
1783 if (!IS_BROXTON(dev)) {
1784 control &= ~PANEL_UNLOCK_MASK;
1785 control |= PANEL_UNLOCK_REGS;
1787 return control;
1791 * Must be paired with edp_panel_vdd_off().
1792 * Must hold pps_mutex around the whole on/off sequence.
1793 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1795 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1797 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1798 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1799 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1800 struct drm_i915_private *dev_priv = dev->dev_private;
1801 enum intel_display_power_domain power_domain;
1802 u32 pp;
1803 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1804 bool need_to_disable = !intel_dp->want_panel_vdd;
1806 lockdep_assert_held(&dev_priv->pps_mutex);
1808 if (!is_edp(intel_dp))
1809 return false;
1811 cancel_delayed_work(&intel_dp->panel_vdd_work);
1812 intel_dp->want_panel_vdd = true;
1814 if (edp_have_panel_vdd(intel_dp))
1815 return need_to_disable;
1817 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1818 intel_display_power_get(dev_priv, power_domain);
1820 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1821 port_name(intel_dig_port->port));
1823 if (!edp_have_panel_power(intel_dp))
1824 wait_panel_power_cycle(intel_dp);
1826 pp = ironlake_get_pp_control(intel_dp);
1827 pp |= EDP_FORCE_VDD;
1829 pp_stat_reg = _pp_stat_reg(intel_dp);
1830 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1832 I915_WRITE(pp_ctrl_reg, pp);
1833 POSTING_READ(pp_ctrl_reg);
1834 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1835 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1837 * If the panel wasn't on, delay before accessing aux channel
1839 if (!edp_have_panel_power(intel_dp)) {
1840 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1841 port_name(intel_dig_port->port));
1842 msleep(intel_dp->panel_power_up_delay);
1845 return need_to_disable;
1849 * Must be paired with intel_edp_panel_vdd_off() or
1850 * intel_edp_panel_off().
1851 * Nested calls to these functions are not allowed since
1852 * we drop the lock. Caller must use some higher level
1853 * locking to prevent nested calls from other threads.
1855 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1857 bool vdd;
1859 if (!is_edp(intel_dp))
1860 return;
1862 pps_lock(intel_dp);
1863 vdd = edp_panel_vdd_on(intel_dp);
1864 pps_unlock(intel_dp);
1866 #ifdef __DragonFly__
1867 /* XXX: limit dmesg spam to 16 warnings instead of 137, where is the bug? */
1868 if(!vdd)
1869 DRM_ERROR_RATELIMITED("eDP port %c VDD already requested on\n",
1870 port_name(dp_to_dig_port(intel_dp)->port));
1871 #else
1872 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1873 port_name(dp_to_dig_port(intel_dp)->port));
1874 #endif
1877 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1879 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1880 struct drm_i915_private *dev_priv = dev->dev_private;
1881 struct intel_digital_port *intel_dig_port =
1882 dp_to_dig_port(intel_dp);
1883 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1884 enum intel_display_power_domain power_domain;
1885 u32 pp;
1886 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1888 lockdep_assert_held(&dev_priv->pps_mutex);
1890 WARN_ON(intel_dp->want_panel_vdd);
1892 if (!edp_have_panel_vdd(intel_dp))
1893 return;
1895 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1896 port_name(intel_dig_port->port));
1898 pp = ironlake_get_pp_control(intel_dp);
1899 pp &= ~EDP_FORCE_VDD;
1901 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1902 pp_stat_reg = _pp_stat_reg(intel_dp);
1904 I915_WRITE(pp_ctrl_reg, pp);
1905 POSTING_READ(pp_ctrl_reg);
1907 /* Make sure sequencer is idle before allowing subsequent activity */
1908 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1909 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1911 if ((pp & POWER_TARGET_ON) == 0)
1912 intel_dp->panel_power_off_time = ktime_get_boottime();
1914 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1915 intel_display_power_put(dev_priv, power_domain);
1918 static void edp_panel_vdd_work(struct work_struct *__work)
1920 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1921 struct intel_dp, panel_vdd_work);
1923 pps_lock(intel_dp);
1924 if (!intel_dp->want_panel_vdd)
1925 edp_panel_vdd_off_sync(intel_dp);
1926 pps_unlock(intel_dp);
1929 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1931 unsigned long delay;
1934 * Queue the timer to fire a long time from now (relative to the power
1935 * down delay) to keep the panel power up across a sequence of
1936 * operations.
1938 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1939 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1943 * Must be paired with edp_panel_vdd_on().
1944 * Must hold pps_mutex around the whole on/off sequence.
1945 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1947 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1949 struct drm_i915_private *dev_priv =
1950 intel_dp_to_dev(intel_dp)->dev_private;
1952 lockdep_assert_held(&dev_priv->pps_mutex);
1954 if (!is_edp(intel_dp))
1955 return;
1957 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1958 port_name(dp_to_dig_port(intel_dp)->port));
1960 intel_dp->want_panel_vdd = false;
1962 if (sync)
1963 edp_panel_vdd_off_sync(intel_dp);
1964 else
1965 edp_panel_vdd_schedule_off(intel_dp);
1968 static void edp_panel_on(struct intel_dp *intel_dp)
1970 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1971 struct drm_i915_private *dev_priv = dev->dev_private;
1972 u32 pp;
1973 i915_reg_t pp_ctrl_reg;
1975 lockdep_assert_held(&dev_priv->pps_mutex);
1977 if (!is_edp(intel_dp))
1978 return;
1980 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1981 port_name(dp_to_dig_port(intel_dp)->port));
1983 if (WARN(edp_have_panel_power(intel_dp),
1984 "eDP port %c panel power already on\n",
1985 port_name(dp_to_dig_port(intel_dp)->port)))
1986 return;
1988 wait_panel_power_cycle(intel_dp);
1990 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1991 pp = ironlake_get_pp_control(intel_dp);
1992 if (IS_GEN5(dev)) {
1993 /* ILK workaround: disable reset around power sequence */
1994 pp &= ~PANEL_POWER_RESET;
1995 I915_WRITE(pp_ctrl_reg, pp);
1996 POSTING_READ(pp_ctrl_reg);
1999 pp |= POWER_TARGET_ON;
2000 if (!IS_GEN5(dev))
2001 pp |= PANEL_POWER_RESET;
2003 I915_WRITE(pp_ctrl_reg, pp);
2004 POSTING_READ(pp_ctrl_reg);
2006 wait_panel_on(intel_dp);
2007 intel_dp->last_power_on = jiffies;
2009 if (IS_GEN5(dev)) {
2010 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2011 I915_WRITE(pp_ctrl_reg, pp);
2012 POSTING_READ(pp_ctrl_reg);
2016 void intel_edp_panel_on(struct intel_dp *intel_dp)
2018 if (!is_edp(intel_dp))
2019 return;
2021 pps_lock(intel_dp);
2022 edp_panel_on(intel_dp);
2023 pps_unlock(intel_dp);
2027 static void edp_panel_off(struct intel_dp *intel_dp)
2029 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2030 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2031 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2032 struct drm_i915_private *dev_priv = dev->dev_private;
2033 enum intel_display_power_domain power_domain;
2034 u32 pp;
2035 i915_reg_t pp_ctrl_reg;
2037 lockdep_assert_held(&dev_priv->pps_mutex);
2039 if (!is_edp(intel_dp))
2040 return;
2042 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2043 port_name(dp_to_dig_port(intel_dp)->port));
2045 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2046 port_name(dp_to_dig_port(intel_dp)->port));
2048 pp = ironlake_get_pp_control(intel_dp);
2049 /* We need to switch off panel power _and_ force vdd, for otherwise some
2050 * panels get very unhappy and cease to work. */
2051 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2052 EDP_BLC_ENABLE);
2054 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2056 intel_dp->want_panel_vdd = false;
2058 I915_WRITE(pp_ctrl_reg, pp);
2059 POSTING_READ(pp_ctrl_reg);
2061 intel_dp->panel_power_off_time = ktime_get_boottime();
2062 wait_panel_off(intel_dp);
2064 /* We got a reference when we enabled the VDD. */
2065 power_domain = intel_display_port_aux_power_domain(intel_encoder);
2066 intel_display_power_put(dev_priv, power_domain);
2069 void intel_edp_panel_off(struct intel_dp *intel_dp)
2071 if (!is_edp(intel_dp))
2072 return;
2074 pps_lock(intel_dp);
2075 edp_panel_off(intel_dp);
2076 pps_unlock(intel_dp);
2079 /* Enable backlight in the panel power control. */
2080 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2082 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2083 struct drm_device *dev = intel_dig_port->base.base.dev;
2084 struct drm_i915_private *dev_priv = dev->dev_private;
2085 u32 pp;
2086 i915_reg_t pp_ctrl_reg;
2089 * If we enable the backlight right away following a panel power
2090 * on, we may see slight flicker as the panel syncs with the eDP
2091 * link. So delay a bit to make sure the image is solid before
2092 * allowing it to appear.
2094 wait_backlight_on(intel_dp);
2096 pps_lock(intel_dp);
2098 pp = ironlake_get_pp_control(intel_dp);
2099 pp |= EDP_BLC_ENABLE;
2101 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2103 I915_WRITE(pp_ctrl_reg, pp);
2104 POSTING_READ(pp_ctrl_reg);
2106 pps_unlock(intel_dp);
2109 /* Enable backlight PWM and backlight PP control. */
2110 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2112 if (!is_edp(intel_dp))
2113 return;
2115 DRM_DEBUG_KMS("\n");
2117 intel_panel_enable_backlight(intel_dp->attached_connector);
2118 _intel_edp_backlight_on(intel_dp);
2121 /* Disable backlight in the panel power control. */
2122 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2124 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2125 struct drm_i915_private *dev_priv = dev->dev_private;
2126 u32 pp;
2127 i915_reg_t pp_ctrl_reg;
2129 if (!is_edp(intel_dp))
2130 return;
2132 pps_lock(intel_dp);
2134 pp = ironlake_get_pp_control(intel_dp);
2135 pp &= ~EDP_BLC_ENABLE;
2137 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2139 I915_WRITE(pp_ctrl_reg, pp);
2140 POSTING_READ(pp_ctrl_reg);
2142 pps_unlock(intel_dp);
2144 intel_dp->last_backlight_off = jiffies;
2145 edp_wait_backlight_off(intel_dp);
2148 /* Disable backlight PP control and backlight PWM. */
2149 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2151 if (!is_edp(intel_dp))
2152 return;
2154 DRM_DEBUG_KMS("\n");
2156 _intel_edp_backlight_off(intel_dp);
2157 intel_panel_disable_backlight(intel_dp->attached_connector);
2161 * Hook for controlling the panel power control backlight through the bl_power
2162 * sysfs attribute. Take care to handle multiple calls.
2164 static void intel_edp_backlight_power(struct intel_connector *connector,
2165 bool enable)
2167 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2168 bool is_enabled;
2170 pps_lock(intel_dp);
2171 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2172 pps_unlock(intel_dp);
2174 if (is_enabled == enable)
2175 return;
2177 DRM_DEBUG_KMS("panel power control backlight %s\n",
2178 enable ? "enable" : "disable");
2180 if (enable)
2181 _intel_edp_backlight_on(intel_dp);
2182 else
2183 _intel_edp_backlight_off(intel_dp);
2186 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2188 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2189 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2190 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2192 I915_STATE_WARN(cur_state != state,
2193 "DP port %c state assertion failure (expected %s, current %s)\n",
2194 port_name(dig_port->port),
2195 onoff(state), onoff(cur_state));
2197 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2199 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2201 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2203 I915_STATE_WARN(cur_state != state,
2204 "eDP PLL state assertion failure (expected %s, current %s)\n",
2205 onoff(state), onoff(cur_state));
2207 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2208 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2210 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2212 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2213 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2214 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2216 assert_pipe_disabled(dev_priv, crtc->pipe);
2217 assert_dp_port_disabled(intel_dp);
2218 assert_edp_pll_disabled(dev_priv);
2220 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2221 crtc->config->port_clock);
2223 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2225 if (crtc->config->port_clock == 162000)
2226 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2227 else
2228 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2230 I915_WRITE(DP_A, intel_dp->DP);
2231 POSTING_READ(DP_A);
2232 udelay(500);
2235 * [DevILK] Work around required when enabling DP PLL
2236 * while a pipe is enabled going to FDI:
2237 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2238 * 2. Program DP PLL enable
2240 if (IS_GEN5(dev_priv))
2241 intel_wait_for_vblank_if_active(dev_priv->dev, !crtc->pipe);
2243 intel_dp->DP |= DP_PLL_ENABLE;
2245 I915_WRITE(DP_A, intel_dp->DP);
2246 POSTING_READ(DP_A);
2247 udelay(200);
2250 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2252 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2253 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2254 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2256 assert_pipe_disabled(dev_priv, crtc->pipe);
2257 assert_dp_port_disabled(intel_dp);
2258 assert_edp_pll_enabled(dev_priv);
2260 DRM_DEBUG_KMS("disabling eDP PLL\n");
2262 intel_dp->DP &= ~DP_PLL_ENABLE;
2264 I915_WRITE(DP_A, intel_dp->DP);
2265 POSTING_READ(DP_A);
2266 udelay(200);
2269 /* If the sink supports it, try to set the power state appropriately */
2270 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2272 int ret, i;
2274 /* Should have a valid DPCD by this point */
2275 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2276 return;
2278 if (mode != DRM_MODE_DPMS_ON) {
2279 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2280 DP_SET_POWER_D3);
2281 } else {
2283 * When turning on, we need to retry for 1ms to give the sink
2284 * time to wake up.
2286 for (i = 0; i < 3; i++) {
2287 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2288 DP_SET_POWER_D0);
2289 if (ret == 1)
2290 break;
2291 msleep(1);
2295 if (ret != 1)
2296 DRM_DEBUG_KMS("failed to %s sink power state\n",
2297 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2300 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2301 enum i915_pipe *pipe)
2303 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2304 enum port port = dp_to_dig_port(intel_dp)->port;
2305 struct drm_device *dev = encoder->base.dev;
2306 struct drm_i915_private *dev_priv = dev->dev_private;
2307 enum intel_display_power_domain power_domain;
2308 u32 tmp;
2309 bool ret;
2311 power_domain = intel_display_port_power_domain(encoder);
2312 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
2313 return false;
2315 ret = false;
2317 tmp = I915_READ(intel_dp->output_reg);
2319 if (!(tmp & DP_PORT_EN))
2320 goto out;
2322 if (IS_GEN7(dev) && port == PORT_A) {
2323 *pipe = PORT_TO_PIPE_CPT(tmp);
2324 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2325 enum i915_pipe p;
2327 for_each_pipe(dev_priv, p) {
2328 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2329 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2330 *pipe = p;
2331 ret = true;
2333 goto out;
2337 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2338 i915_mmio_reg_offset(intel_dp->output_reg));
2339 } else if (IS_CHERRYVIEW(dev)) {
2340 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2341 } else {
2342 *pipe = PORT_TO_PIPE(tmp);
2345 ret = true;
2347 out:
2348 intel_display_power_put(dev_priv, power_domain);
2350 return ret;
2353 static void intel_dp_get_config(struct intel_encoder *encoder,
2354 struct intel_crtc_state *pipe_config)
2356 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2357 u32 tmp, flags = 0;
2358 struct drm_device *dev = encoder->base.dev;
2359 struct drm_i915_private *dev_priv = dev->dev_private;
2360 enum port port = dp_to_dig_port(intel_dp)->port;
2361 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2363 tmp = I915_READ(intel_dp->output_reg);
2365 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2367 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2368 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2370 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2371 flags |= DRM_MODE_FLAG_PHSYNC;
2372 else
2373 flags |= DRM_MODE_FLAG_NHSYNC;
2375 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2376 flags |= DRM_MODE_FLAG_PVSYNC;
2377 else
2378 flags |= DRM_MODE_FLAG_NVSYNC;
2379 } else {
2380 if (tmp & DP_SYNC_HS_HIGH)
2381 flags |= DRM_MODE_FLAG_PHSYNC;
2382 else
2383 flags |= DRM_MODE_FLAG_NHSYNC;
2385 if (tmp & DP_SYNC_VS_HIGH)
2386 flags |= DRM_MODE_FLAG_PVSYNC;
2387 else
2388 flags |= DRM_MODE_FLAG_NVSYNC;
2391 pipe_config->base.adjusted_mode.flags |= flags;
2393 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2394 !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
2395 pipe_config->limited_color_range = true;
2397 pipe_config->has_dp_encoder = true;
2399 pipe_config->lane_count =
2400 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2402 intel_dp_get_m_n(crtc, pipe_config);
2404 if (port == PORT_A) {
2405 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2406 pipe_config->port_clock = 162000;
2407 else
2408 pipe_config->port_clock = 270000;
2411 pipe_config->base.adjusted_mode.crtc_clock =
2412 intel_dotclock_calculate(pipe_config->port_clock,
2413 &pipe_config->dp_m_n);
2415 if (is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
2416 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
2418 * This is a big fat ugly hack.
2420 * Some machines in UEFI boot mode provide us a VBT that has 18
2421 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2422 * unknown we fail to light up. Yet the same BIOS boots up with
2423 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2424 * max, not what it tells us to use.
2426 * Note: This will still be broken if the eDP panel is not lit
2427 * up by the BIOS, and thus we can't get the mode at module
2428 * load.
2430 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2431 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
2432 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
2436 static void intel_disable_dp(struct intel_encoder *encoder)
2438 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2439 struct drm_device *dev = encoder->base.dev;
2440 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2442 if (crtc->config->has_audio)
2443 intel_audio_codec_disable(encoder);
2445 if (HAS_PSR(dev) && !HAS_DDI(dev))
2446 intel_psr_disable(intel_dp);
2448 /* Make sure the panel is off before trying to change the mode. But also
2449 * ensure that we have vdd while we switch off the panel. */
2450 intel_edp_panel_vdd_on(intel_dp);
2451 intel_edp_backlight_off(intel_dp);
2452 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2453 intel_edp_panel_off(intel_dp);
2455 /* disable the port before the pipe on g4x */
2456 if (INTEL_INFO(dev)->gen < 5)
2457 intel_dp_link_down(intel_dp);
2460 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2462 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2463 enum port port = dp_to_dig_port(intel_dp)->port;
2465 intel_dp_link_down(intel_dp);
2467 /* Only ilk+ has port A */
2468 if (port == PORT_A)
2469 ironlake_edp_pll_off(intel_dp);
2472 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2474 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2476 intel_dp_link_down(intel_dp);
2479 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2480 bool reset)
2482 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2483 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2484 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2485 enum i915_pipe pipe = crtc->pipe;
2486 uint32_t val;
2488 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2489 if (reset)
2490 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2491 else
2492 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2493 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2495 if (crtc->config->lane_count > 2) {
2496 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2497 if (reset)
2498 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2499 else
2500 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2501 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2504 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2505 val |= CHV_PCS_REQ_SOFTRESET_EN;
2506 if (reset)
2507 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2508 else
2509 val |= DPIO_PCS_CLK_SOFT_RESET;
2510 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2512 if (crtc->config->lane_count > 2) {
2513 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2514 val |= CHV_PCS_REQ_SOFTRESET_EN;
2515 if (reset)
2516 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2517 else
2518 val |= DPIO_PCS_CLK_SOFT_RESET;
2519 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2523 static void chv_post_disable_dp(struct intel_encoder *encoder)
2525 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2526 struct drm_device *dev = encoder->base.dev;
2527 struct drm_i915_private *dev_priv = dev->dev_private;
2529 intel_dp_link_down(intel_dp);
2531 mutex_lock(&dev_priv->sb_lock);
2533 /* Assert data lane reset */
2534 chv_data_lane_soft_reset(encoder, true);
2536 mutex_unlock(&dev_priv->sb_lock);
2539 static void
2540 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2541 uint32_t *DP,
2542 uint8_t dp_train_pat)
2544 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2545 struct drm_device *dev = intel_dig_port->base.base.dev;
2546 struct drm_i915_private *dev_priv = dev->dev_private;
2547 enum port port = intel_dig_port->port;
2549 if (HAS_DDI(dev)) {
2550 uint32_t temp = I915_READ(DP_TP_CTL(port));
2552 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2553 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2554 else
2555 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2557 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2558 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2559 case DP_TRAINING_PATTERN_DISABLE:
2560 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2562 break;
2563 case DP_TRAINING_PATTERN_1:
2564 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2565 break;
2566 case DP_TRAINING_PATTERN_2:
2567 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2568 break;
2569 case DP_TRAINING_PATTERN_3:
2570 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2571 break;
2573 I915_WRITE(DP_TP_CTL(port), temp);
2575 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2576 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2577 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2579 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2580 case DP_TRAINING_PATTERN_DISABLE:
2581 *DP |= DP_LINK_TRAIN_OFF_CPT;
2582 break;
2583 case DP_TRAINING_PATTERN_1:
2584 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2585 break;
2586 case DP_TRAINING_PATTERN_2:
2587 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2588 break;
2589 case DP_TRAINING_PATTERN_3:
2590 DRM_ERROR("DP training pattern 3 not supported\n");
2591 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2592 break;
2595 } else {
2596 if (IS_CHERRYVIEW(dev))
2597 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2598 else
2599 *DP &= ~DP_LINK_TRAIN_MASK;
2601 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2602 case DP_TRAINING_PATTERN_DISABLE:
2603 *DP |= DP_LINK_TRAIN_OFF;
2604 break;
2605 case DP_TRAINING_PATTERN_1:
2606 *DP |= DP_LINK_TRAIN_PAT_1;
2607 break;
2608 case DP_TRAINING_PATTERN_2:
2609 *DP |= DP_LINK_TRAIN_PAT_2;
2610 break;
2611 case DP_TRAINING_PATTERN_3:
2612 if (IS_CHERRYVIEW(dev)) {
2613 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2614 } else {
2615 DRM_ERROR("DP training pattern 3 not supported\n");
2616 *DP |= DP_LINK_TRAIN_PAT_2;
2618 break;
2623 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2625 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2626 struct drm_i915_private *dev_priv = dev->dev_private;
2627 struct intel_crtc *crtc =
2628 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
2630 /* enable with pattern 1 (as per spec) */
2631 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2632 DP_TRAINING_PATTERN_1);
2634 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2635 POSTING_READ(intel_dp->output_reg);
2638 * Magic for VLV/CHV. We _must_ first set up the register
2639 * without actually enabling the port, and then do another
2640 * write to enable the port. Otherwise link training will
2641 * fail when the power sequencer is freshly used for this port.
2643 intel_dp->DP |= DP_PORT_EN;
2644 if (crtc->config->has_audio)
2645 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2647 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2648 POSTING_READ(intel_dp->output_reg);
2651 static void intel_enable_dp(struct intel_encoder *encoder)
2653 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2654 struct drm_device *dev = encoder->base.dev;
2655 struct drm_i915_private *dev_priv = dev->dev_private;
2656 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2657 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2658 enum i915_pipe pipe = crtc->pipe;
2660 if (WARN_ON(dp_reg & DP_PORT_EN))
2661 return;
2663 pps_lock(intel_dp);
2665 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
2666 vlv_init_panel_power_sequencer(intel_dp);
2668 intel_dp_enable_port(intel_dp);
2670 edp_panel_vdd_on(intel_dp);
2671 edp_panel_on(intel_dp);
2672 edp_panel_vdd_off(intel_dp, true);
2674 pps_unlock(intel_dp);
2676 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
2677 unsigned int lane_mask = 0x0;
2679 if (IS_CHERRYVIEW(dev))
2680 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2682 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2683 lane_mask);
2686 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2687 intel_dp_start_link_train(intel_dp);
2688 intel_dp_stop_link_train(intel_dp);
2690 if (crtc->config->has_audio) {
2691 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2692 pipe_name(pipe));
2693 intel_audio_codec_enable(encoder);
2697 static void g4x_enable_dp(struct intel_encoder *encoder)
2699 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2701 intel_enable_dp(encoder);
2702 intel_edp_backlight_on(intel_dp);
2705 static void vlv_enable_dp(struct intel_encoder *encoder)
2707 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2709 intel_edp_backlight_on(intel_dp);
2710 intel_psr_enable(intel_dp);
2713 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2715 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2716 enum port port = dp_to_dig_port(intel_dp)->port;
2718 intel_dp_prepare(encoder);
2720 /* Only ilk+ has port A */
2721 if (port == PORT_A)
2722 ironlake_edp_pll_on(intel_dp);
2725 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2727 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2728 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2729 enum i915_pipe pipe = intel_dp->pps_pipe;
2730 i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2732 edp_panel_vdd_off_sync(intel_dp);
2735 * VLV seems to get confused when multiple power seqeuencers
2736 * have the same port selected (even if only one has power/vdd
2737 * enabled). The failure manifests as vlv_wait_port_ready() failing
2738 * CHV on the other hand doesn't seem to mind having the same port
2739 * selected in multiple power seqeuencers, but let's clear the
2740 * port select always when logically disconnecting a power sequencer
2741 * from a port.
2743 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2744 pipe_name(pipe), port_name(intel_dig_port->port));
2745 I915_WRITE(pp_on_reg, 0);
2746 POSTING_READ(pp_on_reg);
2748 intel_dp->pps_pipe = INVALID_PIPE;
2751 static void vlv_steal_power_sequencer(struct drm_device *dev,
2752 enum i915_pipe pipe)
2754 struct drm_i915_private *dev_priv = dev->dev_private;
2755 struct intel_encoder *encoder;
2757 lockdep_assert_held(&dev_priv->pps_mutex);
2759 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2760 return;
2762 for_each_intel_encoder(dev, encoder) {
2763 struct intel_dp *intel_dp;
2764 enum port port;
2766 if (encoder->type != INTEL_OUTPUT_EDP)
2767 continue;
2769 intel_dp = enc_to_intel_dp(&encoder->base);
2770 port = dp_to_dig_port(intel_dp)->port;
2772 if (intel_dp->pps_pipe != pipe)
2773 continue;
2775 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2776 pipe_name(pipe), port_name(port));
2778 WARN(encoder->base.crtc,
2779 "stealing pipe %c power sequencer from active eDP port %c\n",
2780 pipe_name(pipe), port_name(port));
2782 /* make sure vdd is off before we steal it */
2783 vlv_detach_power_sequencer(intel_dp);
2787 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2789 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2790 struct intel_encoder *encoder = &intel_dig_port->base;
2791 struct drm_device *dev = encoder->base.dev;
2792 struct drm_i915_private *dev_priv = dev->dev_private;
2793 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2795 lockdep_assert_held(&dev_priv->pps_mutex);
2797 if (!is_edp(intel_dp))
2798 return;
2800 if (intel_dp->pps_pipe == crtc->pipe)
2801 return;
2804 * If another power sequencer was being used on this
2805 * port previously make sure to turn off vdd there while
2806 * we still have control of it.
2808 if (intel_dp->pps_pipe != INVALID_PIPE)
2809 vlv_detach_power_sequencer(intel_dp);
2812 * We may be stealing the power
2813 * sequencer from another port.
2815 vlv_steal_power_sequencer(dev, crtc->pipe);
2817 /* now it's all ours */
2818 intel_dp->pps_pipe = crtc->pipe;
2820 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2821 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2823 /* init power sequencer on this pipe and port */
2824 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2825 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2828 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2830 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2831 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2832 struct drm_device *dev = encoder->base.dev;
2833 struct drm_i915_private *dev_priv = dev->dev_private;
2834 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2835 enum dpio_channel port = vlv_dport_to_channel(dport);
2836 int pipe = intel_crtc->pipe;
2837 u32 val;
2839 mutex_lock(&dev_priv->sb_lock);
2841 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2842 val = 0;
2843 if (pipe)
2844 val |= (1<<21);
2845 else
2846 val &= ~(1<<21);
2847 val |= 0x001000c4;
2848 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2849 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2850 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2852 mutex_unlock(&dev_priv->sb_lock);
2854 intel_enable_dp(encoder);
2857 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2859 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2860 struct drm_device *dev = encoder->base.dev;
2861 struct drm_i915_private *dev_priv = dev->dev_private;
2862 struct intel_crtc *intel_crtc =
2863 to_intel_crtc(encoder->base.crtc);
2864 enum dpio_channel port = vlv_dport_to_channel(dport);
2865 int pipe = intel_crtc->pipe;
2867 intel_dp_prepare(encoder);
2869 /* Program Tx lane resets to default */
2870 mutex_lock(&dev_priv->sb_lock);
2871 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2872 DPIO_PCS_TX_LANE2_RESET |
2873 DPIO_PCS_TX_LANE1_RESET);
2874 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2875 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2876 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2877 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2878 DPIO_PCS_CLK_SOFT_RESET);
2880 /* Fix up inter-pair skew failure */
2881 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2882 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2883 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2884 mutex_unlock(&dev_priv->sb_lock);
2887 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2889 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2890 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2891 struct drm_device *dev = encoder->base.dev;
2892 struct drm_i915_private *dev_priv = dev->dev_private;
2893 struct intel_crtc *intel_crtc =
2894 to_intel_crtc(encoder->base.crtc);
2895 enum dpio_channel ch = vlv_dport_to_channel(dport);
2896 int pipe = intel_crtc->pipe;
2897 int data, i, stagger;
2898 u32 val;
2900 mutex_lock(&dev_priv->sb_lock);
2902 /* allow hardware to manage TX FIFO reset source */
2903 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2904 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2905 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2907 if (intel_crtc->config->lane_count > 2) {
2908 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2909 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2910 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2913 /* Program Tx lane latency optimal setting*/
2914 for (i = 0; i < intel_crtc->config->lane_count; i++) {
2915 /* Set the upar bit */
2916 if (intel_crtc->config->lane_count == 1)
2917 data = 0x0;
2918 else
2919 data = (i == 1) ? 0x0 : 0x1;
2920 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2921 data << DPIO_UPAR_SHIFT);
2924 /* Data lane stagger programming */
2925 if (intel_crtc->config->port_clock > 270000)
2926 stagger = 0x18;
2927 else if (intel_crtc->config->port_clock > 135000)
2928 stagger = 0xd;
2929 else if (intel_crtc->config->port_clock > 67500)
2930 stagger = 0x7;
2931 else if (intel_crtc->config->port_clock > 33750)
2932 stagger = 0x4;
2933 else
2934 stagger = 0x2;
2936 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2937 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2938 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2940 if (intel_crtc->config->lane_count > 2) {
2941 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2942 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2943 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2946 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2947 DPIO_LANESTAGGER_STRAP(stagger) |
2948 DPIO_LANESTAGGER_STRAP_OVRD |
2949 DPIO_TX1_STAGGER_MASK(0x1f) |
2950 DPIO_TX1_STAGGER_MULT(6) |
2951 DPIO_TX2_STAGGER_MULT(0));
2953 if (intel_crtc->config->lane_count > 2) {
2954 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2955 DPIO_LANESTAGGER_STRAP(stagger) |
2956 DPIO_LANESTAGGER_STRAP_OVRD |
2957 DPIO_TX1_STAGGER_MASK(0x1f) |
2958 DPIO_TX1_STAGGER_MULT(7) |
2959 DPIO_TX2_STAGGER_MULT(5));
2962 /* Deassert data lane reset */
2963 chv_data_lane_soft_reset(encoder, false);
2965 mutex_unlock(&dev_priv->sb_lock);
2967 intel_enable_dp(encoder);
2969 /* Second common lane will stay alive on its own now */
2970 if (dport->release_cl2_override) {
2971 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2972 dport->release_cl2_override = false;
2976 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2978 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2979 struct drm_device *dev = encoder->base.dev;
2980 struct drm_i915_private *dev_priv = dev->dev_private;
2981 struct intel_crtc *intel_crtc =
2982 to_intel_crtc(encoder->base.crtc);
2983 enum dpio_channel ch = vlv_dport_to_channel(dport);
2984 enum i915_pipe pipe = intel_crtc->pipe;
2985 unsigned int lane_mask =
2986 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
2987 u32 val;
2989 intel_dp_prepare(encoder);
2992 * Must trick the second common lane into life.
2993 * Otherwise we can't even access the PLL.
2995 if (ch == DPIO_CH0 && pipe == PIPE_B)
2996 dport->release_cl2_override =
2997 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2999 chv_phy_powergate_lanes(encoder, true, lane_mask);
3001 mutex_lock(&dev_priv->sb_lock);
3003 /* Assert data lane reset */
3004 chv_data_lane_soft_reset(encoder, true);
3006 /* program left/right clock distribution */
3007 if (pipe != PIPE_B) {
3008 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3009 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3010 if (ch == DPIO_CH0)
3011 val |= CHV_BUFLEFTENA1_FORCE;
3012 if (ch == DPIO_CH1)
3013 val |= CHV_BUFRIGHTENA1_FORCE;
3014 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3015 } else {
3016 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3017 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3018 if (ch == DPIO_CH0)
3019 val |= CHV_BUFLEFTENA2_FORCE;
3020 if (ch == DPIO_CH1)
3021 val |= CHV_BUFRIGHTENA2_FORCE;
3022 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3025 /* program clock channel usage */
3026 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3027 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3028 if (pipe != PIPE_B)
3029 val &= ~CHV_PCS_USEDCLKCHANNEL;
3030 else
3031 val |= CHV_PCS_USEDCLKCHANNEL;
3032 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3034 if (intel_crtc->config->lane_count > 2) {
3035 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3036 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3037 if (pipe != PIPE_B)
3038 val &= ~CHV_PCS_USEDCLKCHANNEL;
3039 else
3040 val |= CHV_PCS_USEDCLKCHANNEL;
3041 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3045 * This a a bit weird since generally CL
3046 * matches the pipe, but here we need to
3047 * pick the CL based on the port.
3049 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3050 if (pipe != PIPE_B)
3051 val &= ~CHV_CMN_USEDCLKCHANNEL;
3052 else
3053 val |= CHV_CMN_USEDCLKCHANNEL;
3054 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3056 mutex_unlock(&dev_priv->sb_lock);
3059 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3061 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3062 enum i915_pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3063 u32 val;
3065 mutex_lock(&dev_priv->sb_lock);
3067 /* disable left/right clock distribution */
3068 if (pipe != PIPE_B) {
3069 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3070 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3071 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3072 } else {
3073 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3074 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3075 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3078 mutex_unlock(&dev_priv->sb_lock);
3081 * Leave the power down bit cleared for at least one
3082 * lane so that chv_powergate_phy_ch() will power
3083 * on something when the channel is otherwise unused.
3084 * When the port is off and the override is removed
3085 * the lanes power down anyway, so otherwise it doesn't
3086 * really matter what the state of power down bits is
3087 * after this.
3089 chv_phy_powergate_lanes(encoder, false, 0x0);
3093 * Fetch AUX CH registers 0x202 - 0x207 which contain
3094 * link status information
3096 bool
3097 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3099 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3100 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3103 /* These are source-specific values. */
3104 uint8_t
3105 intel_dp_voltage_max(struct intel_dp *intel_dp)
3107 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3108 struct drm_i915_private *dev_priv = dev->dev_private;
3109 enum port port = dp_to_dig_port(intel_dp)->port;
3111 if (IS_BROXTON(dev))
3112 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3113 else if (INTEL_INFO(dev)->gen >= 9) {
3114 if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
3115 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3116 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3117 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
3118 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3119 else if (IS_GEN7(dev) && port == PORT_A)
3120 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3121 else if (HAS_PCH_CPT(dev) && port != PORT_A)
3122 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3123 else
3124 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3127 uint8_t
3128 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3130 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3131 enum port port = dp_to_dig_port(intel_dp)->port;
3133 if (INTEL_INFO(dev)->gen >= 9) {
3134 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3135 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3136 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3137 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3138 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3139 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3140 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3141 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3142 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3143 default:
3144 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3146 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3147 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3148 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3149 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3150 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3151 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3152 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3153 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3154 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3155 default:
3156 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3158 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
3159 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3160 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3161 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3162 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3163 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3164 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3165 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3166 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3167 default:
3168 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3170 } else if (IS_GEN7(dev) && port == PORT_A) {
3171 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3172 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3173 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3174 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3175 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3176 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3177 default:
3178 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3180 } else {
3181 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3182 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3183 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3184 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3185 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3186 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3187 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3188 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3189 default:
3190 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3195 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3197 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3198 struct drm_i915_private *dev_priv = dev->dev_private;
3199 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3200 struct intel_crtc *intel_crtc =
3201 to_intel_crtc(dport->base.base.crtc);
3202 unsigned long demph_reg_value, preemph_reg_value,
3203 uniqtranscale_reg_value;
3204 uint8_t train_set = intel_dp->train_set[0];
3205 enum dpio_channel port = vlv_dport_to_channel(dport);
3206 int pipe = intel_crtc->pipe;
3208 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3209 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3210 preemph_reg_value = 0x0004000;
3211 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3212 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3213 demph_reg_value = 0x2B405555;
3214 uniqtranscale_reg_value = 0x552AB83A;
3215 break;
3216 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3217 demph_reg_value = 0x2B404040;
3218 uniqtranscale_reg_value = 0x5548B83A;
3219 break;
3220 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3221 demph_reg_value = 0x2B245555;
3222 uniqtranscale_reg_value = 0x5560B83A;
3223 break;
3224 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3225 demph_reg_value = 0x2B405555;
3226 uniqtranscale_reg_value = 0x5598DA3A;
3227 break;
3228 default:
3229 return 0;
3231 break;
3232 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3233 preemph_reg_value = 0x0002000;
3234 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3235 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3236 demph_reg_value = 0x2B404040;
3237 uniqtranscale_reg_value = 0x5552B83A;
3238 break;
3239 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3240 demph_reg_value = 0x2B404848;
3241 uniqtranscale_reg_value = 0x5580B83A;
3242 break;
3243 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3244 demph_reg_value = 0x2B404040;
3245 uniqtranscale_reg_value = 0x55ADDA3A;
3246 break;
3247 default:
3248 return 0;
3250 break;
3251 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3252 preemph_reg_value = 0x0000000;
3253 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3254 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3255 demph_reg_value = 0x2B305555;
3256 uniqtranscale_reg_value = 0x5570B83A;
3257 break;
3258 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3259 demph_reg_value = 0x2B2B4040;
3260 uniqtranscale_reg_value = 0x55ADDA3A;
3261 break;
3262 default:
3263 return 0;
3265 break;
3266 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3267 preemph_reg_value = 0x0006000;
3268 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3269 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3270 demph_reg_value = 0x1B405555;
3271 uniqtranscale_reg_value = 0x55ADDA3A;
3272 break;
3273 default:
3274 return 0;
3276 break;
3277 default:
3278 return 0;
3281 mutex_lock(&dev_priv->sb_lock);
3282 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3283 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3284 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3285 uniqtranscale_reg_value);
3286 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3287 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3288 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3289 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3290 mutex_unlock(&dev_priv->sb_lock);
3292 return 0;
3295 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3297 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3298 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3301 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3303 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3304 struct drm_i915_private *dev_priv = dev->dev_private;
3305 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3306 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3307 u32 deemph_reg_value, margin_reg_value, val;
3308 uint8_t train_set = intel_dp->train_set[0];
3309 enum dpio_channel ch = vlv_dport_to_channel(dport);
3310 enum i915_pipe pipe = intel_crtc->pipe;
3311 int i;
3313 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3314 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3315 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3317 deemph_reg_value = 128;
3318 margin_reg_value = 52;
3319 break;
3320 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3321 deemph_reg_value = 128;
3322 margin_reg_value = 77;
3323 break;
3324 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3325 deemph_reg_value = 128;
3326 margin_reg_value = 102;
3327 break;
3328 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3329 deemph_reg_value = 128;
3330 margin_reg_value = 154;
3331 /* FIXME extra to set for 1200 */
3332 break;
3333 default:
3334 return 0;
3336 break;
3337 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3338 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3339 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3340 deemph_reg_value = 85;
3341 margin_reg_value = 78;
3342 break;
3343 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3344 deemph_reg_value = 85;
3345 margin_reg_value = 116;
3346 break;
3347 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3348 deemph_reg_value = 85;
3349 margin_reg_value = 154;
3350 break;
3351 default:
3352 return 0;
3354 break;
3355 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3356 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3357 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3358 deemph_reg_value = 64;
3359 margin_reg_value = 104;
3360 break;
3361 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3362 deemph_reg_value = 64;
3363 margin_reg_value = 154;
3364 break;
3365 default:
3366 return 0;
3368 break;
3369 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3370 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3371 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3372 deemph_reg_value = 43;
3373 margin_reg_value = 154;
3374 break;
3375 default:
3376 return 0;
3378 break;
3379 default:
3380 return 0;
3383 mutex_lock(&dev_priv->sb_lock);
3385 /* Clear calc init */
3386 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3387 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3388 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3389 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3390 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3392 if (intel_crtc->config->lane_count > 2) {
3393 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3394 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3395 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3396 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3397 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3400 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3401 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3402 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3403 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3405 if (intel_crtc->config->lane_count > 2) {
3406 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3407 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3408 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3409 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3412 /* Program swing deemph */
3413 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3414 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3415 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3416 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3417 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3420 /* Program swing margin */
3421 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3422 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3424 val &= ~DPIO_SWING_MARGIN000_MASK;
3425 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3428 * Supposedly this value shouldn't matter when unique transition
3429 * scale is disabled, but in fact it does matter. Let's just
3430 * always program the same value and hope it's OK.
3432 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3433 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3435 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3439 * The document said it needs to set bit 27 for ch0 and bit 26
3440 * for ch1. Might be a typo in the doc.
3441 * For now, for this unique transition scale selection, set bit
3442 * 27 for ch0 and ch1.
3444 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3445 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3446 if (chv_need_uniq_trans_scale(train_set))
3447 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3448 else
3449 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3450 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3453 /* Start swing calculation */
3454 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3455 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3456 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3458 if (intel_crtc->config->lane_count > 2) {
3459 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3460 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3461 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3464 mutex_unlock(&dev_priv->sb_lock);
3466 return 0;
3469 static uint32_t
3470 gen4_signal_levels(uint8_t train_set)
3472 uint32_t signal_levels = 0;
3474 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3475 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3476 default:
3477 signal_levels |= DP_VOLTAGE_0_4;
3478 break;
3479 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3480 signal_levels |= DP_VOLTAGE_0_6;
3481 break;
3482 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3483 signal_levels |= DP_VOLTAGE_0_8;
3484 break;
3485 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3486 signal_levels |= DP_VOLTAGE_1_2;
3487 break;
3489 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3490 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3491 default:
3492 signal_levels |= DP_PRE_EMPHASIS_0;
3493 break;
3494 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3495 signal_levels |= DP_PRE_EMPHASIS_3_5;
3496 break;
3497 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3498 signal_levels |= DP_PRE_EMPHASIS_6;
3499 break;
3500 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3501 signal_levels |= DP_PRE_EMPHASIS_9_5;
3502 break;
3504 return signal_levels;
3507 /* Gen6's DP voltage swing and pre-emphasis control */
3508 static uint32_t
3509 gen6_edp_signal_levels(uint8_t train_set)
3511 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3512 DP_TRAIN_PRE_EMPHASIS_MASK);
3513 switch (signal_levels) {
3514 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3515 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3516 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3517 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3518 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3519 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3520 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3521 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3522 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3523 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3524 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3525 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3526 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3527 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3528 default:
3529 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3530 "0x%x\n", signal_levels);
3531 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3535 /* Gen7's DP voltage swing and pre-emphasis control */
3536 static uint32_t
3537 gen7_edp_signal_levels(uint8_t train_set)
3539 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3540 DP_TRAIN_PRE_EMPHASIS_MASK);
3541 switch (signal_levels) {
3542 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3543 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3544 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3545 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3546 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3547 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3549 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3550 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3551 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3552 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3554 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3555 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3556 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3557 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3559 default:
3560 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3561 "0x%x\n", signal_levels);
3562 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3566 void
3567 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3569 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3570 enum port port = intel_dig_port->port;
3571 struct drm_device *dev = intel_dig_port->base.base.dev;
3572 struct drm_i915_private *dev_priv = to_i915(dev);
3573 uint32_t signal_levels, mask = 0;
3574 uint8_t train_set = intel_dp->train_set[0];
3576 if (HAS_DDI(dev)) {
3577 signal_levels = ddi_signal_levels(intel_dp);
3579 if (IS_BROXTON(dev))
3580 signal_levels = 0;
3581 else
3582 mask = DDI_BUF_EMP_MASK;
3583 } else if (IS_CHERRYVIEW(dev)) {
3584 signal_levels = chv_signal_levels(intel_dp);
3585 } else if (IS_VALLEYVIEW(dev)) {
3586 signal_levels = vlv_signal_levels(intel_dp);
3587 } else if (IS_GEN7(dev) && port == PORT_A) {
3588 signal_levels = gen7_edp_signal_levels(train_set);
3589 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3590 } else if (IS_GEN6(dev) && port == PORT_A) {
3591 signal_levels = gen6_edp_signal_levels(train_set);
3592 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3593 } else {
3594 signal_levels = gen4_signal_levels(train_set);
3595 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3598 if (mask)
3599 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3601 DRM_DEBUG_KMS("Using vswing level %d\n",
3602 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3603 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3604 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3605 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3607 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3609 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3610 POSTING_READ(intel_dp->output_reg);
3613 void
3614 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3615 uint8_t dp_train_pat)
3617 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3618 struct drm_i915_private *dev_priv =
3619 to_i915(intel_dig_port->base.base.dev);
3621 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3623 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3624 POSTING_READ(intel_dp->output_reg);
3627 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3629 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3630 struct drm_device *dev = intel_dig_port->base.base.dev;
3631 struct drm_i915_private *dev_priv = dev->dev_private;
3632 enum port port = intel_dig_port->port;
3633 uint32_t val;
3635 if (!HAS_DDI(dev))
3636 return;
3638 val = I915_READ(DP_TP_CTL(port));
3639 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3640 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3641 I915_WRITE(DP_TP_CTL(port), val);
3644 * On PORT_A we can have only eDP in SST mode. There the only reason
3645 * we need to set idle transmission mode is to work around a HW issue
3646 * where we enable the pipe while not in idle link-training mode.
3647 * In this case there is requirement to wait for a minimum number of
3648 * idle patterns to be sent.
3650 if (port == PORT_A)
3651 return;
3653 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3655 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3658 static void
3659 intel_dp_link_down(struct intel_dp *intel_dp)
3661 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3662 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3663 enum port port = intel_dig_port->port;
3664 struct drm_device *dev = intel_dig_port->base.base.dev;
3665 struct drm_i915_private *dev_priv = dev->dev_private;
3666 uint32_t DP = intel_dp->DP;
3668 if (WARN_ON(HAS_DDI(dev)))
3669 return;
3671 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3672 return;
3674 DRM_DEBUG_KMS("\n");
3676 if ((IS_GEN7(dev) && port == PORT_A) ||
3677 (HAS_PCH_CPT(dev) && port != PORT_A)) {
3678 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3679 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3680 } else {
3681 if (IS_CHERRYVIEW(dev))
3682 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3683 else
3684 DP &= ~DP_LINK_TRAIN_MASK;
3685 DP |= DP_LINK_TRAIN_PAT_IDLE;
3687 I915_WRITE(intel_dp->output_reg, DP);
3688 POSTING_READ(intel_dp->output_reg);
3690 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3691 I915_WRITE(intel_dp->output_reg, DP);
3692 POSTING_READ(intel_dp->output_reg);
3695 * HW workaround for IBX, we need to move the port
3696 * to transcoder A after disabling it to allow the
3697 * matching HDMI port to be enabled on transcoder A.
3699 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3701 * We get CPU/PCH FIFO underruns on the other pipe when
3702 * doing the workaround. Sweep them under the rug.
3704 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3705 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3707 /* always enable with pattern 1 (as per spec) */
3708 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3709 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3710 I915_WRITE(intel_dp->output_reg, DP);
3711 POSTING_READ(intel_dp->output_reg);
3713 DP &= ~DP_PORT_EN;
3714 I915_WRITE(intel_dp->output_reg, DP);
3715 POSTING_READ(intel_dp->output_reg);
3717 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3718 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3719 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3722 msleep(intel_dp->panel_power_down_delay);
3724 intel_dp->DP = DP;
3727 static bool
3728 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3730 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3731 struct drm_device *dev = dig_port->base.base.dev;
3732 struct drm_i915_private *dev_priv = dev->dev_private;
3733 uint8_t rev;
3735 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
3736 sizeof(intel_dp->dpcd)) < 0)
3737 return false; /* aux transfer failed */
3739 #ifdef __DragonFly__
3740 char dpcd_hex_dump[DP_RECEIVER_CAP_SIZE * 3];
3741 DRM_DEBUG_KMS("DPCD: %s\n", hexncpy(intel_dp->dpcd, sizeof(intel_dp->dpcd),
3742 dpcd_hex_dump, sizeof(dpcd_hex_dump), " "));
3743 #else
3744 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3745 #endif
3747 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3748 return false; /* DPCD not present */
3750 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
3751 &intel_dp->sink_count, 1) < 0)
3752 return false;
3755 * Sink count can change between short pulse hpd hence
3756 * a member variable in intel_dp will track any changes
3757 * between short pulse interrupts.
3759 intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count);
3762 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
3763 * a dongle is present but no display. Unless we require to know
3764 * if a dongle is present or not, we don't need to update
3765 * downstream port information. So, an early return here saves
3766 * time from performing other operations which are not required.
3768 if (!is_edp(intel_dp) && !intel_dp->sink_count)
3769 return false;
3771 /* Check if the panel supports PSR */
3772 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3773 if (is_edp(intel_dp)) {
3774 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
3775 intel_dp->psr_dpcd,
3776 sizeof(intel_dp->psr_dpcd));
3777 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3778 dev_priv->psr.sink_support = true;
3779 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3782 if (INTEL_INFO(dev)->gen >= 9 &&
3783 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3784 uint8_t frame_sync_cap;
3786 dev_priv->psr.sink_support = true;
3787 drm_dp_dpcd_read(&intel_dp->aux,
3788 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3789 &frame_sync_cap, 1);
3790 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3791 /* PSR2 needs frame sync as well */
3792 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3793 DRM_DEBUG_KMS("PSR2 %s on sink",
3794 dev_priv->psr.psr2_support ? "supported" : "not supported");
3798 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3799 yesno(intel_dp_source_supports_hbr2(intel_dp)),
3800 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3802 /* Intermediate frequency support */
3803 if (is_edp(intel_dp) &&
3804 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3805 (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3806 (rev >= 0x03)) { /* eDp v1.4 or higher */
3807 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3808 int i;
3810 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
3811 sink_rates, sizeof(sink_rates));
3813 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3814 int val = le16_to_cpu(sink_rates[i]);
3816 if (val == 0)
3817 break;
3819 /* Value read is in kHz while drm clock is saved in deca-kHz */
3820 intel_dp->sink_rates[i] = (val * 200) / 10;
3822 intel_dp->num_sink_rates = i;
3825 intel_dp_print_rates(intel_dp);
3827 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3828 DP_DWN_STRM_PORT_PRESENT))
3829 return true; /* native DP sink */
3831 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3832 return true; /* no per-port downstream info */
3834 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3835 intel_dp->downstream_ports,
3836 DP_MAX_DOWNSTREAM_PORTS) < 0)
3837 return false; /* downstream port status fetch failed */
3839 return true;
3842 static void
3843 intel_dp_probe_oui(struct intel_dp *intel_dp)
3845 u8 buf[3];
3847 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3848 return;
3850 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3851 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3852 buf[0], buf[1], buf[2]);
3854 if (drm_dp_dpcd_read(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3855 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3856 buf[0], buf[1], buf[2]);
3859 static bool
3860 intel_dp_probe_mst(struct intel_dp *intel_dp)
3862 u8 buf[1];
3864 if (!i915.enable_dp_mst)
3865 return false;
3867 if (!intel_dp->can_mst)
3868 return false;
3870 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3871 return false;
3873 if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3874 if (buf[0] & DP_MST_CAP) {
3875 DRM_DEBUG_KMS("Sink is MST capable\n");
3876 intel_dp->is_mst = true;
3877 } else {
3878 DRM_DEBUG_KMS("Sink is not MST capable\n");
3879 intel_dp->is_mst = false;
3883 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3884 return intel_dp->is_mst;
3887 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3889 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3890 struct drm_device *dev = dig_port->base.base.dev;
3891 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3892 u8 buf;
3893 int ret = 0;
3894 int count = 0;
3895 int attempts = 10;
3897 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3898 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3899 ret = -EIO;
3900 goto out;
3903 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3904 buf & ~DP_TEST_SINK_START) < 0) {
3905 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3906 ret = -EIO;
3907 goto out;
3910 do {
3911 intel_wait_for_vblank(dev, intel_crtc->pipe);
3913 if (drm_dp_dpcd_readb(&intel_dp->aux,
3914 DP_TEST_SINK_MISC, &buf) < 0) {
3915 ret = -EIO;
3916 goto out;
3918 count = buf & DP_TEST_COUNT_MASK;
3919 } while (--attempts && count);
3921 if (attempts == 0) {
3922 DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
3923 ret = -ETIMEDOUT;
3926 out:
3927 hsw_enable_ips(intel_crtc);
3928 return ret;
3931 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
3933 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3934 struct drm_device *dev = dig_port->base.base.dev;
3935 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3936 u8 buf;
3937 int ret;
3939 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3940 return -EIO;
3942 if (!(buf & DP_TEST_CRC_SUPPORTED))
3943 return -ENOTTY;
3945 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3946 return -EIO;
3948 if (buf & DP_TEST_SINK_START) {
3949 ret = intel_dp_sink_crc_stop(intel_dp);
3950 if (ret)
3951 return ret;
3954 hsw_disable_ips(intel_crtc);
3956 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3957 buf | DP_TEST_SINK_START) < 0) {
3958 hsw_enable_ips(intel_crtc);
3959 return -EIO;
3962 intel_wait_for_vblank(dev, intel_crtc->pipe);
3963 return 0;
3966 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3968 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3969 struct drm_device *dev = dig_port->base.base.dev;
3970 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3971 u8 buf;
3972 int count, ret;
3973 int attempts = 6;
3975 ret = intel_dp_sink_crc_start(intel_dp);
3976 if (ret)
3977 return ret;
3979 do {
3980 intel_wait_for_vblank(dev, intel_crtc->pipe);
3982 if (drm_dp_dpcd_readb(&intel_dp->aux,
3983 DP_TEST_SINK_MISC, &buf) < 0) {
3984 ret = -EIO;
3985 goto stop;
3987 count = buf & DP_TEST_COUNT_MASK;
3989 } while (--attempts && count == 0);
3991 if (attempts == 0) {
3992 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
3993 ret = -ETIMEDOUT;
3994 goto stop;
3997 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
3998 ret = -EIO;
3999 goto stop;
4002 stop:
4003 intel_dp_sink_crc_stop(intel_dp);
4004 return ret;
4007 static bool
4008 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4010 return drm_dp_dpcd_read(&intel_dp->aux,
4011 DP_DEVICE_SERVICE_IRQ_VECTOR,
4012 sink_irq_vector, 1) == 1;
4015 static bool
4016 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4018 int ret;
4020 ret = drm_dp_dpcd_read(&intel_dp->aux,
4021 DP_SINK_COUNT_ESI,
4022 sink_irq_vector, 14);
4023 if (ret != 14)
4024 return false;
4026 return true;
4029 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4031 uint8_t test_result = DP_TEST_ACK;
4032 return test_result;
4035 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4037 uint8_t test_result = DP_TEST_NAK;
4038 return test_result;
4041 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4043 uint8_t test_result = DP_TEST_NAK;
4044 struct intel_connector *intel_connector = intel_dp->attached_connector;
4045 struct drm_connector *connector = &intel_connector->base;
4047 if (intel_connector->detect_edid == NULL ||
4048 connector->edid_corrupt ||
4049 intel_dp->aux.i2c_defer_count > 6) {
4050 /* Check EDID read for NACKs, DEFERs and corruption
4051 * (DP CTS 1.2 Core r1.1)
4052 * 4.2.2.4 : Failed EDID read, I2C_NAK
4053 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4054 * 4.2.2.6 : EDID corruption detected
4055 * Use failsafe mode for all cases
4057 if (intel_dp->aux.i2c_nack_count > 0 ||
4058 intel_dp->aux.i2c_defer_count > 0)
4059 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4060 intel_dp->aux.i2c_nack_count,
4061 intel_dp->aux.i2c_defer_count);
4062 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4063 } else {
4064 struct edid *block = intel_connector->detect_edid;
4066 /* We have to write the checksum
4067 * of the last block read
4069 block += intel_connector->detect_edid->extensions;
4071 if (!drm_dp_dpcd_write(&intel_dp->aux,
4072 DP_TEST_EDID_CHECKSUM,
4073 &block->checksum,
4075 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4077 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4078 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4081 /* Set test active flag here so userspace doesn't interrupt things */
4082 intel_dp->compliance_test_active = 1;
4084 return test_result;
4087 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4089 uint8_t test_result = DP_TEST_NAK;
4090 return test_result;
4093 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4095 uint8_t response = DP_TEST_NAK;
4096 uint8_t rxdata = 0;
4097 int status = 0;
4099 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4100 if (status <= 0) {
4101 DRM_DEBUG_KMS("Could not read test request from sink\n");
4102 goto update_status;
4105 switch (rxdata) {
4106 case DP_TEST_LINK_TRAINING:
4107 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4108 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4109 response = intel_dp_autotest_link_training(intel_dp);
4110 break;
4111 case DP_TEST_LINK_VIDEO_PATTERN:
4112 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4113 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4114 response = intel_dp_autotest_video_pattern(intel_dp);
4115 break;
4116 case DP_TEST_LINK_EDID_READ:
4117 DRM_DEBUG_KMS("EDID test requested\n");
4118 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4119 response = intel_dp_autotest_edid(intel_dp);
4120 break;
4121 case DP_TEST_LINK_PHY_TEST_PATTERN:
4122 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4123 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4124 response = intel_dp_autotest_phy_pattern(intel_dp);
4125 break;
4126 default:
4127 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4128 break;
4131 update_status:
4132 status = drm_dp_dpcd_write(&intel_dp->aux,
4133 DP_TEST_RESPONSE,
4134 &response, 1);
4135 if (status <= 0)
4136 DRM_DEBUG_KMS("Could not write test response to sink\n");
4139 static int
4140 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4142 bool bret;
4144 if (intel_dp->is_mst) {
4145 u8 esi[16] = { 0 };
4146 int ret = 0;
4147 int retry;
4148 bool handled;
4149 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4150 go_again:
4151 if (bret == true) {
4153 /* check link status - esi[10] = 0x200c */
4154 if (intel_dp->active_mst_links &&
4155 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4156 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4157 intel_dp_start_link_train(intel_dp);
4158 intel_dp_stop_link_train(intel_dp);
4161 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4162 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4164 if (handled) {
4165 for (retry = 0; retry < 3; retry++) {
4166 int wret;
4167 wret = drm_dp_dpcd_write(&intel_dp->aux,
4168 DP_SINK_COUNT_ESI+1,
4169 &esi[1], 3);
4170 if (wret == 3) {
4171 break;
4175 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4176 if (bret == true) {
4177 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4178 goto go_again;
4180 } else
4181 ret = 0;
4183 return ret;
4184 } else {
4185 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4186 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4187 intel_dp->is_mst = false;
4188 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4189 /* send a hotplug event */
4190 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4193 return -EINVAL;
4196 static void
4197 intel_dp_check_link_status(struct intel_dp *intel_dp)
4199 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4200 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4201 u8 link_status[DP_LINK_STATUS_SIZE];
4203 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4205 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4206 DRM_ERROR("Failed to get link status\n");
4207 return;
4210 if (!intel_encoder->base.crtc)
4211 return;
4213 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4214 return;
4216 /* if link training is requested we should perform it always */
4217 if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4218 (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
4219 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4220 intel_encoder->base.name);
4221 intel_dp_start_link_train(intel_dp);
4222 intel_dp_stop_link_train(intel_dp);
4227 * According to DP spec
4228 * 5.1.2:
4229 * 1. Read DPCD
4230 * 2. Configure link according to Receiver Capabilities
4231 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4232 * 4. Check link status on receipt of hot-plug interrupt
4234 * intel_dp_short_pulse - handles short pulse interrupts
4235 * when full detection is not required.
4236 * Returns %true if short pulse is handled and full detection
4237 * is NOT required and %false otherwise.
4239 static bool
4240 intel_dp_short_pulse(struct intel_dp *intel_dp)
4242 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4243 u8 sink_irq_vector;
4244 u8 old_sink_count = intel_dp->sink_count;
4245 bool ret;
4248 * Clearing compliance test variables to allow capturing
4249 * of values for next automated test request.
4251 intel_dp->compliance_test_active = 0;
4252 intel_dp->compliance_test_type = 0;
4253 intel_dp->compliance_test_data = 0;
4256 * Now read the DPCD to see if it's actually running
4257 * If the current value of sink count doesn't match with
4258 * the value that was stored earlier or dpcd read failed
4259 * we need to do full detection
4261 ret = intel_dp_get_dpcd(intel_dp);
4263 if ((old_sink_count != intel_dp->sink_count) || !ret) {
4264 /* No need to proceed if we are going to do full detect */
4265 return false;
4268 /* Try to read the source of the interrupt */
4269 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4270 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4271 /* Clear interrupt source */
4272 drm_dp_dpcd_writeb(&intel_dp->aux,
4273 DP_DEVICE_SERVICE_IRQ_VECTOR,
4274 sink_irq_vector);
4276 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4277 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4278 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4279 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4282 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4283 intel_dp_check_link_status(intel_dp);
4284 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4286 return true;
4289 /* XXX this is probably wrong for multiple downstream ports */
4290 static enum drm_connector_status
4291 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4293 uint8_t *dpcd = intel_dp->dpcd;
4294 uint8_t type;
4296 if (!intel_dp_get_dpcd(intel_dp))
4297 return connector_status_disconnected;
4299 if (is_edp(intel_dp))
4300 return connector_status_connected;
4302 /* if there's no downstream port, we're done */
4303 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4304 return connector_status_connected;
4306 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4307 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4308 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4310 return intel_dp->sink_count ?
4311 connector_status_connected : connector_status_disconnected;
4314 /* If no HPD, poke DDC gently */
4315 if (drm_probe_ddc(&intel_dp->aux.ddc))
4316 return connector_status_connected;
4318 /* Well we tried, say unknown for unreliable port types */
4319 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4320 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4321 if (type == DP_DS_PORT_TYPE_VGA ||
4322 type == DP_DS_PORT_TYPE_NON_EDID)
4323 return connector_status_unknown;
4324 } else {
4325 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4326 DP_DWN_STRM_PORT_TYPE_MASK;
4327 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4328 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4329 return connector_status_unknown;
4332 /* Anything else is out of spec, warn and ignore */
4333 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4334 return connector_status_disconnected;
4337 static enum drm_connector_status
4338 edp_detect(struct intel_dp *intel_dp)
4340 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4341 enum drm_connector_status status;
4343 status = intel_panel_detect(dev);
4344 if (status == connector_status_unknown)
4345 status = connector_status_connected;
4347 return status;
4350 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4351 struct intel_digital_port *port)
4353 u32 bit;
4355 switch (port->port) {
4356 case PORT_A:
4357 return true;
4358 case PORT_B:
4359 bit = SDE_PORTB_HOTPLUG;
4360 break;
4361 case PORT_C:
4362 bit = SDE_PORTC_HOTPLUG;
4363 break;
4364 case PORT_D:
4365 bit = SDE_PORTD_HOTPLUG;
4366 break;
4367 default:
4368 MISSING_CASE(port->port);
4369 return false;
4372 return I915_READ(SDEISR) & bit;
4375 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4376 struct intel_digital_port *port)
4378 u32 bit;
4380 switch (port->port) {
4381 case PORT_A:
4382 return true;
4383 case PORT_B:
4384 bit = SDE_PORTB_HOTPLUG_CPT;
4385 break;
4386 case PORT_C:
4387 bit = SDE_PORTC_HOTPLUG_CPT;
4388 break;
4389 case PORT_D:
4390 bit = SDE_PORTD_HOTPLUG_CPT;
4391 break;
4392 case PORT_E:
4393 bit = SDE_PORTE_HOTPLUG_SPT;
4394 break;
4395 default:
4396 MISSING_CASE(port->port);
4397 return false;
4400 return I915_READ(SDEISR) & bit;
4403 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4404 struct intel_digital_port *port)
4406 u32 bit;
4408 switch (port->port) {
4409 case PORT_B:
4410 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4411 break;
4412 case PORT_C:
4413 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4414 break;
4415 case PORT_D:
4416 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4417 break;
4418 default:
4419 MISSING_CASE(port->port);
4420 return false;
4423 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4426 static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4427 struct intel_digital_port *port)
4429 u32 bit;
4431 switch (port->port) {
4432 case PORT_B:
4433 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4434 break;
4435 case PORT_C:
4436 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4437 break;
4438 case PORT_D:
4439 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4440 break;
4441 default:
4442 MISSING_CASE(port->port);
4443 return false;
4446 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4449 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4450 struct intel_digital_port *intel_dig_port)
4452 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4453 enum port port;
4454 u32 bit;
4456 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4457 switch (port) {
4458 case PORT_A:
4459 bit = BXT_DE_PORT_HP_DDIA;
4460 break;
4461 case PORT_B:
4462 bit = BXT_DE_PORT_HP_DDIB;
4463 break;
4464 case PORT_C:
4465 bit = BXT_DE_PORT_HP_DDIC;
4466 break;
4467 default:
4468 MISSING_CASE(port);
4469 return false;
4472 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4476 * intel_digital_port_connected - is the specified port connected?
4477 * @dev_priv: i915 private structure
4478 * @port: the port to test
4480 * Return %true if @port is connected, %false otherwise.
4482 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4483 struct intel_digital_port *port)
4485 if (HAS_PCH_IBX(dev_priv))
4486 return ibx_digital_port_connected(dev_priv, port);
4487 else if (HAS_PCH_SPLIT(dev_priv))
4488 return cpt_digital_port_connected(dev_priv, port);
4489 else if (IS_BROXTON(dev_priv))
4490 return bxt_digital_port_connected(dev_priv, port);
4491 else if (IS_GM45(dev_priv))
4492 return gm45_digital_port_connected(dev_priv, port);
4493 else
4494 return g4x_digital_port_connected(dev_priv, port);
4497 static struct edid *
4498 intel_dp_get_edid(struct intel_dp *intel_dp)
4500 struct intel_connector *intel_connector = intel_dp->attached_connector;
4502 /* use cached edid if we have one */
4503 if (intel_connector->edid) {
4504 /* invalid edid */
4505 if (IS_ERR(intel_connector->edid))
4506 return NULL;
4508 return drm_edid_duplicate(intel_connector->edid);
4509 } else
4510 return drm_get_edid(&intel_connector->base,
4511 &intel_dp->aux.ddc);
4514 static void
4515 intel_dp_set_edid(struct intel_dp *intel_dp)
4517 struct intel_connector *intel_connector = intel_dp->attached_connector;
4518 struct edid *edid;
4520 intel_dp_unset_edid(intel_dp);
4521 edid = intel_dp_get_edid(intel_dp);
4522 intel_connector->detect_edid = edid;
4524 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4525 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4526 else
4527 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4530 static void
4531 intel_dp_unset_edid(struct intel_dp *intel_dp)
4533 struct intel_connector *intel_connector = intel_dp->attached_connector;
4535 kfree(intel_connector->detect_edid);
4536 intel_connector->detect_edid = NULL;
4538 intel_dp->has_audio = false;
4541 static void
4542 intel_dp_long_pulse(struct intel_connector *intel_connector)
4544 struct drm_connector *connector = &intel_connector->base;
4545 struct intel_dp *intel_dp = intel_attached_dp(connector);
4546 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4547 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4548 struct drm_device *dev = connector->dev;
4549 enum drm_connector_status status;
4550 enum intel_display_power_domain power_domain;
4551 bool ret;
4552 u8 sink_irq_vector;
4554 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4555 intel_display_power_get(to_i915(dev), power_domain);
4557 /* Can't disconnect eDP, but you can close the lid... */
4558 if (is_edp(intel_dp))
4559 status = edp_detect(intel_dp);
4560 else if (intel_digital_port_connected(to_i915(dev),
4561 dp_to_dig_port(intel_dp)))
4562 status = intel_dp_detect_dpcd(intel_dp);
4563 else
4564 status = connector_status_disconnected;
4566 if (status != connector_status_connected) {
4567 intel_dp->compliance_test_active = 0;
4568 intel_dp->compliance_test_type = 0;
4569 intel_dp->compliance_test_data = 0;
4571 if (intel_dp->is_mst) {
4572 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
4573 intel_dp->is_mst,
4574 intel_dp->mst_mgr.mst_state);
4575 intel_dp->is_mst = false;
4576 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4577 intel_dp->is_mst);
4580 goto out;
4583 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4584 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4586 intel_dp_probe_oui(intel_dp);
4588 ret = intel_dp_probe_mst(intel_dp);
4589 if (ret) {
4591 * If we are in MST mode then this connector
4592 * won't appear connected or have anything
4593 * with EDID on it
4595 status = connector_status_disconnected;
4596 goto out;
4597 } else if (connector->status == connector_status_connected) {
4599 * If display was connected already and is still connected
4600 * check links status, there has been known issues of
4601 * link loss triggerring long pulse!!!!
4603 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4604 intel_dp_check_link_status(intel_dp);
4605 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4606 goto out;
4610 * Clearing NACK and defer counts to get their exact values
4611 * while reading EDID which are required by Compliance tests
4612 * 4.2.2.4 and 4.2.2.5
4614 intel_dp->aux.i2c_nack_count = 0;
4615 intel_dp->aux.i2c_defer_count = 0;
4617 intel_dp_set_edid(intel_dp);
4619 status = connector_status_connected;
4620 intel_dp->detect_done = true;
4622 /* Try to read the source of the interrupt */
4623 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4624 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4625 /* Clear interrupt source */
4626 drm_dp_dpcd_writeb(&intel_dp->aux,
4627 DP_DEVICE_SERVICE_IRQ_VECTOR,
4628 sink_irq_vector);
4630 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4631 intel_dp_handle_test_request(intel_dp);
4632 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4633 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4636 out:
4637 if ((status != connector_status_connected) &&
4638 (intel_dp->is_mst == false))
4639 intel_dp_unset_edid(intel_dp);
4641 intel_display_power_put(to_i915(dev), power_domain);
4642 return;
4645 static enum drm_connector_status
4646 intel_dp_detect(struct drm_connector *connector, bool force)
4648 struct intel_dp *intel_dp = intel_attached_dp(connector);
4649 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4650 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4651 struct intel_connector *intel_connector = to_intel_connector(connector);
4653 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4654 connector->base.id, connector->name);
4656 if (intel_dp->is_mst) {
4657 /* MST devices are disconnected from a monitor POV */
4658 intel_dp_unset_edid(intel_dp);
4659 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4660 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4661 return connector_status_disconnected;
4664 /* If full detect is not performed yet, do a full detect */
4665 if (!intel_dp->detect_done)
4666 intel_dp_long_pulse(intel_dp->attached_connector);
4668 intel_dp->detect_done = false;
4670 if (is_edp(intel_dp) || intel_connector->detect_edid)
4671 return connector_status_connected;
4672 else
4673 return connector_status_disconnected;
4676 static void
4677 intel_dp_force(struct drm_connector *connector)
4679 struct intel_dp *intel_dp = intel_attached_dp(connector);
4680 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4681 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4682 enum intel_display_power_domain power_domain;
4684 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4685 connector->base.id, connector->name);
4686 intel_dp_unset_edid(intel_dp);
4688 if (connector->status != connector_status_connected)
4689 return;
4691 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4692 intel_display_power_get(dev_priv, power_domain);
4694 intel_dp_set_edid(intel_dp);
4696 intel_display_power_put(dev_priv, power_domain);
4698 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4699 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4702 static int intel_dp_get_modes(struct drm_connector *connector)
4704 struct intel_connector *intel_connector = to_intel_connector(connector);
4705 struct edid *edid;
4707 edid = intel_connector->detect_edid;
4708 if (edid) {
4709 int ret = intel_connector_update_modes(connector, edid);
4710 if (ret)
4711 return ret;
4714 /* if eDP has no EDID, fall back to fixed mode */
4715 if (is_edp(intel_attached_dp(connector)) &&
4716 intel_connector->panel.fixed_mode) {
4717 struct drm_display_mode *mode;
4719 mode = drm_mode_duplicate(connector->dev,
4720 intel_connector->panel.fixed_mode);
4721 if (mode) {
4722 drm_mode_probed_add(connector, mode);
4723 return 1;
4727 return 0;
4730 static bool
4731 intel_dp_detect_audio(struct drm_connector *connector)
4733 bool has_audio = false;
4734 struct edid *edid;
4736 edid = to_intel_connector(connector)->detect_edid;
4737 if (edid)
4738 has_audio = drm_detect_monitor_audio(edid);
4740 return has_audio;
4743 static int
4744 intel_dp_set_property(struct drm_connector *connector,
4745 struct drm_property *property,
4746 uint64_t val)
4748 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4749 struct intel_connector *intel_connector = to_intel_connector(connector);
4750 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4751 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4752 int ret;
4754 ret = drm_object_property_set_value(&connector->base, property, val);
4755 if (ret)
4756 return ret;
4758 if (property == dev_priv->force_audio_property) {
4759 int i = val;
4760 bool has_audio;
4762 if (i == intel_dp->force_audio)
4763 return 0;
4765 intel_dp->force_audio = i;
4767 if (i == HDMI_AUDIO_AUTO)
4768 has_audio = intel_dp_detect_audio(connector);
4769 else
4770 has_audio = (i == HDMI_AUDIO_ON);
4772 if (has_audio == intel_dp->has_audio)
4773 return 0;
4775 intel_dp->has_audio = has_audio;
4776 goto done;
4779 if (property == dev_priv->broadcast_rgb_property) {
4780 bool old_auto = intel_dp->color_range_auto;
4781 bool old_range = intel_dp->limited_color_range;
4783 switch (val) {
4784 case INTEL_BROADCAST_RGB_AUTO:
4785 intel_dp->color_range_auto = true;
4786 break;
4787 case INTEL_BROADCAST_RGB_FULL:
4788 intel_dp->color_range_auto = false;
4789 intel_dp->limited_color_range = false;
4790 break;
4791 case INTEL_BROADCAST_RGB_LIMITED:
4792 intel_dp->color_range_auto = false;
4793 intel_dp->limited_color_range = true;
4794 break;
4795 default:
4796 return -EINVAL;
4799 if (old_auto == intel_dp->color_range_auto &&
4800 old_range == intel_dp->limited_color_range)
4801 return 0;
4803 goto done;
4806 if (is_edp(intel_dp) &&
4807 property == connector->dev->mode_config.scaling_mode_property) {
4808 if (val == DRM_MODE_SCALE_NONE) {
4809 DRM_DEBUG_KMS("no scaling not supported\n");
4810 return -EINVAL;
4812 if (HAS_GMCH_DISPLAY(dev_priv) &&
4813 val == DRM_MODE_SCALE_CENTER) {
4814 DRM_DEBUG_KMS("centering not supported\n");
4815 return -EINVAL;
4818 if (intel_connector->panel.fitting_mode == val) {
4819 /* the eDP scaling property is not changed */
4820 return 0;
4822 intel_connector->panel.fitting_mode = val;
4824 goto done;
4827 return -EINVAL;
4829 done:
4830 if (intel_encoder->base.crtc)
4831 intel_crtc_restore_mode(intel_encoder->base.crtc);
4833 return 0;
4836 static void
4837 intel_dp_connector_destroy(struct drm_connector *connector)
4839 struct intel_connector *intel_connector = to_intel_connector(connector);
4841 kfree(intel_connector->detect_edid);
4843 if (!IS_ERR_OR_NULL(intel_connector->edid))
4844 kfree(intel_connector->edid);
4846 /* Can't call is_edp() since the encoder may have been destroyed
4847 * already. */
4848 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4849 intel_panel_fini(&intel_connector->panel);
4851 drm_connector_cleanup(connector);
4852 kfree(connector);
4855 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4857 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4858 struct intel_dp *intel_dp = &intel_dig_port->dp;
4860 intel_dp_mst_encoder_cleanup(intel_dig_port);
4861 if (is_edp(intel_dp)) {
4862 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4864 * vdd might still be enabled do to the delayed vdd off.
4865 * Make sure vdd is actually turned off here.
4867 pps_lock(intel_dp);
4868 edp_panel_vdd_off_sync(intel_dp);
4869 pps_unlock(intel_dp);
4871 #if 0
4872 if (intel_dp->edp_notifier.notifier_call) {
4873 unregister_reboot_notifier(&intel_dp->edp_notifier);
4874 intel_dp->edp_notifier.notifier_call = NULL;
4876 #endif
4878 drm_encoder_cleanup(encoder);
4879 kfree(intel_dig_port);
4882 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4884 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4886 if (!is_edp(intel_dp))
4887 return;
4890 * vdd might still be enabled do to the delayed vdd off.
4891 * Make sure vdd is actually turned off here.
4893 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4894 pps_lock(intel_dp);
4895 edp_panel_vdd_off_sync(intel_dp);
4896 pps_unlock(intel_dp);
4899 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4901 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4902 struct drm_device *dev = intel_dig_port->base.base.dev;
4903 struct drm_i915_private *dev_priv = dev->dev_private;
4904 enum intel_display_power_domain power_domain;
4906 lockdep_assert_held(&dev_priv->pps_mutex);
4908 if (!edp_have_panel_vdd(intel_dp))
4909 return;
4912 * The VDD bit needs a power domain reference, so if the bit is
4913 * already enabled when we boot or resume, grab this reference and
4914 * schedule a vdd off, so we don't hold on to the reference
4915 * indefinitely.
4917 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4918 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
4919 intel_display_power_get(dev_priv, power_domain);
4921 edp_panel_vdd_schedule_off(intel_dp);
4924 void intel_dp_encoder_reset(struct drm_encoder *encoder)
4926 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
4927 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4929 if (!HAS_DDI(dev_priv))
4930 intel_dp->DP = I915_READ(intel_dp->output_reg);
4932 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4933 return;
4935 pps_lock(intel_dp);
4938 * Read out the current power sequencer assignment,
4939 * in case the BIOS did something with it.
4941 if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
4942 vlv_initial_power_sequencer_setup(intel_dp);
4944 intel_edp_panel_vdd_sanitize(intel_dp);
4946 pps_unlock(intel_dp);
4949 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4950 .dpms = drm_atomic_helper_connector_dpms,
4951 .detect = intel_dp_detect,
4952 .force = intel_dp_force,
4953 .fill_modes = drm_helper_probe_single_connector_modes,
4954 .set_property = intel_dp_set_property,
4955 .atomic_get_property = intel_connector_atomic_get_property,
4956 .destroy = intel_dp_connector_destroy,
4957 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4958 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4961 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4962 .get_modes = intel_dp_get_modes,
4963 .mode_valid = intel_dp_mode_valid,
4964 .best_encoder = intel_best_encoder,
4967 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4968 .reset = intel_dp_encoder_reset,
4969 .destroy = intel_dp_encoder_destroy,
4972 bool
4973 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4975 struct intel_dp *intel_dp = &intel_dig_port->dp;
4976 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4977 struct drm_device *dev = intel_dig_port->base.base.dev;
4978 struct drm_i915_private *dev_priv = dev->dev_private;
4979 enum intel_display_power_domain power_domain;
4980 bool ret = true;
4982 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
4983 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
4984 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4986 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4988 * vdd off can generate a long pulse on eDP which
4989 * would require vdd on to handle it, and thus we
4990 * would end up in an endless cycle of
4991 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4993 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4994 port_name(intel_dig_port->port));
4995 return false;
4998 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4999 port_name(intel_dig_port->port),
5000 long_hpd ? "long" : "short");
5002 power_domain = intel_display_port_aux_power_domain(intel_encoder);
5003 intel_display_power_get(dev_priv, power_domain);
5005 if (long_hpd) {
5006 intel_dp_long_pulse(intel_dp->attached_connector);
5007 if (intel_dp->is_mst)
5008 ret = false;
5009 goto put_power;
5011 } else {
5012 if (intel_dp->is_mst) {
5013 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
5015 * If we were in MST mode, and device is not
5016 * there, get out of MST mode
5018 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5019 intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5020 intel_dp->is_mst = false;
5021 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5022 intel_dp->is_mst);
5023 goto put_power;
5027 if (!intel_dp->is_mst) {
5028 if (!intel_dp_short_pulse(intel_dp)) {
5029 intel_dp_long_pulse(intel_dp->attached_connector);
5030 goto put_power;
5035 ret = false;
5037 put_power:
5038 intel_display_power_put(dev_priv, power_domain);
5040 return ret;
5043 /* check the VBT to see whether the eDP is on another port */
5044 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5046 struct drm_i915_private *dev_priv = dev->dev_private;
5049 * eDP not supported on g4x. so bail out early just
5050 * for a bit extra safety in case the VBT is bonkers.
5052 if (INTEL_INFO(dev)->gen < 5)
5053 return false;
5055 if (port == PORT_A)
5056 return true;
5058 return intel_bios_is_port_edp(dev_priv, port);
5061 void
5062 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5064 struct intel_connector *intel_connector = to_intel_connector(connector);
5066 intel_attach_force_audio_property(connector);
5067 intel_attach_broadcast_rgb_property(connector);
5068 intel_dp->color_range_auto = true;
5070 if (is_edp(intel_dp)) {
5071 drm_mode_create_scaling_mode_property(connector->dev);
5072 drm_object_attach_property(
5073 &connector->base,
5074 connector->dev->mode_config.scaling_mode_property,
5075 DRM_MODE_SCALE_ASPECT);
5076 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5080 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5082 intel_dp->panel_power_off_time = ktime_get_boottime();
5083 intel_dp->last_power_on = jiffies;
5084 intel_dp->last_backlight_off = jiffies;
5087 static void
5088 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5089 struct intel_dp *intel_dp)
5091 struct drm_i915_private *dev_priv = dev->dev_private;
5092 struct edp_power_seq cur, vbt, spec,
5093 *final = &intel_dp->pps_delays;
5094 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5095 i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5097 lockdep_assert_held(&dev_priv->pps_mutex);
5099 /* already initialized? */
5100 if (final->t11_t12 != 0)
5101 return;
5103 if (IS_BROXTON(dev)) {
5105 * TODO: BXT has 2 sets of PPS registers.
5106 * Correct Register for Broxton need to be identified
5107 * using VBT. hardcoding for now
5109 pp_ctrl_reg = BXT_PP_CONTROL(0);
5110 pp_on_reg = BXT_PP_ON_DELAYS(0);
5111 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5112 } else if (HAS_PCH_SPLIT(dev)) {
5113 pp_ctrl_reg = PCH_PP_CONTROL;
5114 pp_on_reg = PCH_PP_ON_DELAYS;
5115 pp_off_reg = PCH_PP_OFF_DELAYS;
5116 pp_div_reg = PCH_PP_DIVISOR;
5117 } else {
5118 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5120 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5121 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5122 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5123 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5126 /* Workaround: Need to write PP_CONTROL with the unlock key as
5127 * the very first thing. */
5128 pp_ctl = ironlake_get_pp_control(intel_dp);
5130 pp_on = I915_READ(pp_on_reg);
5131 pp_off = I915_READ(pp_off_reg);
5132 if (!IS_BROXTON(dev)) {
5133 I915_WRITE(pp_ctrl_reg, pp_ctl);
5134 pp_div = I915_READ(pp_div_reg);
5137 /* Pull timing values out of registers */
5138 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5139 PANEL_POWER_UP_DELAY_SHIFT;
5141 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5142 PANEL_LIGHT_ON_DELAY_SHIFT;
5144 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5145 PANEL_LIGHT_OFF_DELAY_SHIFT;
5147 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5148 PANEL_POWER_DOWN_DELAY_SHIFT;
5150 if (IS_BROXTON(dev)) {
5151 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5152 BXT_POWER_CYCLE_DELAY_SHIFT;
5153 if (tmp > 0)
5154 cur.t11_t12 = (tmp - 1) * 1000;
5155 else
5156 cur.t11_t12 = 0;
5157 } else {
5158 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5159 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5162 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5163 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5165 vbt = dev_priv->vbt.edp.pps;
5167 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5168 * our hw here, which are all in 100usec. */
5169 spec.t1_t3 = 210 * 10;
5170 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5171 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5172 spec.t10 = 500 * 10;
5173 /* This one is special and actually in units of 100ms, but zero
5174 * based in the hw (so we need to add 100 ms). But the sw vbt
5175 * table multiplies it with 1000 to make it in units of 100usec,
5176 * too. */
5177 spec.t11_t12 = (510 + 100) * 10;
5179 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5180 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5182 /* Use the max of the register settings and vbt. If both are
5183 * unset, fall back to the spec limits. */
5184 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5185 spec.field : \
5186 max(cur.field, vbt.field))
5187 assign_final(t1_t3);
5188 assign_final(t8);
5189 assign_final(t9);
5190 assign_final(t10);
5191 assign_final(t11_t12);
5192 #undef assign_final
5194 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5195 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5196 intel_dp->backlight_on_delay = get_delay(t8);
5197 intel_dp->backlight_off_delay = get_delay(t9);
5198 intel_dp->panel_power_down_delay = get_delay(t10);
5199 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5200 #undef get_delay
5202 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5203 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5204 intel_dp->panel_power_cycle_delay);
5206 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5207 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5210 static void
5211 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5212 struct intel_dp *intel_dp)
5214 struct drm_i915_private *dev_priv = dev->dev_private;
5215 u32 pp_on, pp_off, pp_div, port_sel = 0;
5216 int div = dev_priv->rawclk_freq / 1000;
5217 i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
5218 enum port port = dp_to_dig_port(intel_dp)->port;
5219 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5221 lockdep_assert_held(&dev_priv->pps_mutex);
5223 if (IS_BROXTON(dev)) {
5225 * TODO: BXT has 2 sets of PPS registers.
5226 * Correct Register for Broxton need to be identified
5227 * using VBT. hardcoding for now
5229 pp_ctrl_reg = BXT_PP_CONTROL(0);
5230 pp_on_reg = BXT_PP_ON_DELAYS(0);
5231 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5233 } else if (HAS_PCH_SPLIT(dev)) {
5234 pp_on_reg = PCH_PP_ON_DELAYS;
5235 pp_off_reg = PCH_PP_OFF_DELAYS;
5236 pp_div_reg = PCH_PP_DIVISOR;
5237 } else {
5238 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5240 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5241 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5242 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5246 * And finally store the new values in the power sequencer. The
5247 * backlight delays are set to 1 because we do manual waits on them. For
5248 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5249 * we'll end up waiting for the backlight off delay twice: once when we
5250 * do the manual sleep, and once when we disable the panel and wait for
5251 * the PP_STATUS bit to become zero.
5253 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5254 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5255 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5256 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5257 /* Compute the divisor for the pp clock, simply match the Bspec
5258 * formula. */
5259 if (IS_BROXTON(dev)) {
5260 pp_div = I915_READ(pp_ctrl_reg);
5261 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5262 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5263 << BXT_POWER_CYCLE_DELAY_SHIFT);
5264 } else {
5265 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5266 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5267 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5270 /* Haswell doesn't have any port selection bits for the panel
5271 * power sequencer any more. */
5272 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5273 port_sel = PANEL_PORT_SELECT_VLV(port);
5274 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5275 if (port == PORT_A)
5276 port_sel = PANEL_PORT_SELECT_DPA;
5277 else
5278 port_sel = PANEL_PORT_SELECT_DPD;
5281 pp_on |= port_sel;
5283 I915_WRITE(pp_on_reg, pp_on);
5284 I915_WRITE(pp_off_reg, pp_off);
5285 if (IS_BROXTON(dev))
5286 I915_WRITE(pp_ctrl_reg, pp_div);
5287 else
5288 I915_WRITE(pp_div_reg, pp_div);
5290 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5291 I915_READ(pp_on_reg),
5292 I915_READ(pp_off_reg),
5293 IS_BROXTON(dev) ?
5294 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5295 I915_READ(pp_div_reg));
5299 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5300 * @dev: DRM device
5301 * @refresh_rate: RR to be programmed
5303 * This function gets called when refresh rate (RR) has to be changed from
5304 * one frequency to another. Switches can be between high and low RR
5305 * supported by the panel or to any other RR based on media playback (in
5306 * this case, RR value needs to be passed from user space).
5308 * The caller of this function needs to take a lock on dev_priv->drrs.
5310 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5312 struct drm_i915_private *dev_priv = dev->dev_private;
5313 struct intel_encoder *encoder;
5314 struct intel_digital_port *dig_port = NULL;
5315 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5316 struct intel_crtc_state *config = NULL;
5317 struct intel_crtc *intel_crtc = NULL;
5318 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5320 if (refresh_rate <= 0) {
5321 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5322 return;
5325 if (intel_dp == NULL) {
5326 DRM_DEBUG_KMS("DRRS not supported.\n");
5327 return;
5331 * FIXME: This needs proper synchronization with psr state for some
5332 * platforms that cannot have PSR and DRRS enabled at the same time.
5335 dig_port = dp_to_dig_port(intel_dp);
5336 encoder = &dig_port->base;
5337 intel_crtc = to_intel_crtc(encoder->base.crtc);
5339 if (!intel_crtc) {
5340 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5341 return;
5344 config = intel_crtc->config;
5346 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5347 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5348 return;
5351 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5352 refresh_rate)
5353 index = DRRS_LOW_RR;
5355 if (index == dev_priv->drrs.refresh_rate_type) {
5356 DRM_DEBUG_KMS(
5357 "DRRS requested for previously set RR...ignoring\n");
5358 return;
5361 if (!intel_crtc->active) {
5362 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5363 return;
5366 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5367 switch (index) {
5368 case DRRS_HIGH_RR:
5369 intel_dp_set_m_n(intel_crtc, M1_N1);
5370 break;
5371 case DRRS_LOW_RR:
5372 intel_dp_set_m_n(intel_crtc, M2_N2);
5373 break;
5374 case DRRS_MAX_RR:
5375 default:
5376 DRM_ERROR("Unsupported refreshrate type\n");
5378 } else if (INTEL_INFO(dev)->gen > 6) {
5379 i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5380 u32 val;
5382 val = I915_READ(reg);
5383 if (index > DRRS_HIGH_RR) {
5384 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5385 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5386 else
5387 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5388 } else {
5389 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5390 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5391 else
5392 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5394 I915_WRITE(reg, val);
5397 dev_priv->drrs.refresh_rate_type = index;
5399 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5403 * intel_edp_drrs_enable - init drrs struct if supported
5404 * @intel_dp: DP struct
5406 * Initializes frontbuffer_bits and drrs.dp
5408 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5410 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5411 struct drm_i915_private *dev_priv = dev->dev_private;
5412 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5413 struct drm_crtc *crtc = dig_port->base.base.crtc;
5414 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5416 if (!intel_crtc->config->has_drrs) {
5417 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5418 return;
5421 mutex_lock(&dev_priv->drrs.mutex);
5422 if (WARN_ON(dev_priv->drrs.dp)) {
5423 DRM_ERROR("DRRS already enabled\n");
5424 goto unlock;
5427 dev_priv->drrs.busy_frontbuffer_bits = 0;
5429 dev_priv->drrs.dp = intel_dp;
5431 unlock:
5432 mutex_unlock(&dev_priv->drrs.mutex);
5436 * intel_edp_drrs_disable - Disable DRRS
5437 * @intel_dp: DP struct
5440 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5442 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5443 struct drm_i915_private *dev_priv = dev->dev_private;
5444 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5445 struct drm_crtc *crtc = dig_port->base.base.crtc;
5446 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5448 if (!intel_crtc->config->has_drrs)
5449 return;
5451 mutex_lock(&dev_priv->drrs.mutex);
5452 if (!dev_priv->drrs.dp) {
5453 mutex_unlock(&dev_priv->drrs.mutex);
5454 return;
5457 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5458 intel_dp_set_drrs_state(dev_priv->dev,
5459 intel_dp->attached_connector->panel.
5460 fixed_mode->vrefresh);
5462 dev_priv->drrs.dp = NULL;
5463 mutex_unlock(&dev_priv->drrs.mutex);
5465 cancel_delayed_work_sync(&dev_priv->drrs.work);
5468 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5470 struct drm_i915_private *dev_priv =
5471 container_of(work, typeof(*dev_priv), drrs.work.work);
5472 struct intel_dp *intel_dp;
5474 mutex_lock(&dev_priv->drrs.mutex);
5476 intel_dp = dev_priv->drrs.dp;
5478 if (!intel_dp)
5479 goto unlock;
5482 * The delayed work can race with an invalidate hence we need to
5483 * recheck.
5486 if (dev_priv->drrs.busy_frontbuffer_bits)
5487 goto unlock;
5489 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5490 intel_dp_set_drrs_state(dev_priv->dev,
5491 intel_dp->attached_connector->panel.
5492 downclock_mode->vrefresh);
5494 unlock:
5495 mutex_unlock(&dev_priv->drrs.mutex);
5499 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5500 * @dev: DRM device
5501 * @frontbuffer_bits: frontbuffer plane tracking bits
5503 * This function gets called everytime rendering on the given planes start.
5504 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5506 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5508 void intel_edp_drrs_invalidate(struct drm_device *dev,
5509 unsigned frontbuffer_bits)
5511 struct drm_i915_private *dev_priv = dev->dev_private;
5512 struct drm_crtc *crtc;
5513 enum i915_pipe pipe;
5515 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5516 return;
5518 cancel_delayed_work(&dev_priv->drrs.work);
5520 mutex_lock(&dev_priv->drrs.mutex);
5521 if (!dev_priv->drrs.dp) {
5522 mutex_unlock(&dev_priv->drrs.mutex);
5523 return;
5526 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5527 pipe = to_intel_crtc(crtc)->pipe;
5529 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5530 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5532 /* invalidate means busy screen hence upclock */
5533 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5534 intel_dp_set_drrs_state(dev_priv->dev,
5535 dev_priv->drrs.dp->attached_connector->panel.
5536 fixed_mode->vrefresh);
5538 mutex_unlock(&dev_priv->drrs.mutex);
5542 * intel_edp_drrs_flush - Restart Idleness DRRS
5543 * @dev: DRM device
5544 * @frontbuffer_bits: frontbuffer plane tracking bits
5546 * This function gets called every time rendering on the given planes has
5547 * completed or flip on a crtc is completed. So DRRS should be upclocked
5548 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5549 * if no other planes are dirty.
5551 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5553 void intel_edp_drrs_flush(struct drm_device *dev,
5554 unsigned frontbuffer_bits)
5556 struct drm_i915_private *dev_priv = dev->dev_private;
5557 struct drm_crtc *crtc;
5558 enum i915_pipe pipe;
5560 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5561 return;
5563 cancel_delayed_work(&dev_priv->drrs.work);
5565 mutex_lock(&dev_priv->drrs.mutex);
5566 if (!dev_priv->drrs.dp) {
5567 mutex_unlock(&dev_priv->drrs.mutex);
5568 return;
5571 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5572 pipe = to_intel_crtc(crtc)->pipe;
5574 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5575 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5577 /* flush means busy screen hence upclock */
5578 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5579 intel_dp_set_drrs_state(dev_priv->dev,
5580 dev_priv->drrs.dp->attached_connector->panel.
5581 fixed_mode->vrefresh);
5584 * flush also means no more activity hence schedule downclock, if all
5585 * other fbs are quiescent too
5587 if (!dev_priv->drrs.busy_frontbuffer_bits)
5588 schedule_delayed_work(&dev_priv->drrs.work,
5589 msecs_to_jiffies(1000));
5590 mutex_unlock(&dev_priv->drrs.mutex);
5594 * DOC: Display Refresh Rate Switching (DRRS)
5596 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5597 * which enables swtching between low and high refresh rates,
5598 * dynamically, based on the usage scenario. This feature is applicable
5599 * for internal panels.
5601 * Indication that the panel supports DRRS is given by the panel EDID, which
5602 * would list multiple refresh rates for one resolution.
5604 * DRRS is of 2 types - static and seamless.
5605 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5606 * (may appear as a blink on screen) and is used in dock-undock scenario.
5607 * Seamless DRRS involves changing RR without any visual effect to the user
5608 * and can be used during normal system usage. This is done by programming
5609 * certain registers.
5611 * Support for static/seamless DRRS may be indicated in the VBT based on
5612 * inputs from the panel spec.
5614 * DRRS saves power by switching to low RR based on usage scenarios.
5616 * eDP DRRS:-
5617 * The implementation is based on frontbuffer tracking implementation.
5618 * When there is a disturbance on the screen triggered by user activity or a
5619 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5620 * When there is no movement on screen, after a timeout of 1 second, a switch
5621 * to low RR is made.
5622 * For integration with frontbuffer tracking code,
5623 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5625 * DRRS can be further extended to support other internal panels and also
5626 * the scenario of video playback wherein RR is set based on the rate
5627 * requested by userspace.
5631 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5632 * @intel_connector: eDP connector
5633 * @fixed_mode: preferred mode of panel
5635 * This function is called only once at driver load to initialize basic
5636 * DRRS stuff.
5638 * Returns:
5639 * Downclock mode if panel supports it, else return NULL.
5640 * DRRS support is determined by the presence of downclock mode (apart
5641 * from VBT setting).
5643 static struct drm_display_mode *
5644 intel_dp_drrs_init(struct intel_connector *intel_connector,
5645 struct drm_display_mode *fixed_mode)
5647 struct drm_connector *connector = &intel_connector->base;
5648 struct drm_device *dev = connector->dev;
5649 struct drm_i915_private *dev_priv = dev->dev_private;
5650 struct drm_display_mode *downclock_mode = NULL;
5652 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5653 lockinit(&dev_priv->drrs.mutex, "i915dm", 0, LK_CANRECURSE);
5655 if (INTEL_INFO(dev)->gen <= 6) {
5656 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5657 return NULL;
5660 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5661 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5662 return NULL;
5665 downclock_mode = intel_find_panel_downclock
5666 (dev, fixed_mode, connector);
5668 if (!downclock_mode) {
5669 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5670 return NULL;
5673 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5675 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5676 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5677 return downclock_mode;
5680 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5681 struct intel_connector *intel_connector)
5683 struct drm_connector *connector = &intel_connector->base;
5684 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5685 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5686 struct drm_device *dev = intel_encoder->base.dev;
5687 struct drm_i915_private *dev_priv = dev->dev_private;
5688 struct drm_display_mode *fixed_mode = NULL;
5689 struct drm_display_mode *downclock_mode = NULL;
5690 bool has_dpcd;
5691 struct drm_display_mode *scan;
5692 struct edid *edid;
5693 enum i915_pipe pipe = INVALID_PIPE;
5695 if (!is_edp(intel_dp))
5696 return true;
5698 pps_lock(intel_dp);
5699 intel_edp_panel_vdd_sanitize(intel_dp);
5700 pps_unlock(intel_dp);
5702 /* Cache DPCD and EDID for edp. */
5703 has_dpcd = intel_dp_get_dpcd(intel_dp);
5705 if (has_dpcd) {
5706 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5707 dev_priv->no_aux_handshake =
5708 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5709 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5710 } else {
5711 /* if this fails, presume the device is a ghost */
5712 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5713 return false;
5716 /* We now know it's not a ghost, init power sequence regs. */
5717 pps_lock(intel_dp);
5718 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5719 pps_unlock(intel_dp);
5721 mutex_lock(&dev->mode_config.mutex);
5722 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5723 if (edid) {
5724 if (drm_add_edid_modes(connector, edid)) {
5725 drm_mode_connector_update_edid_property(connector,
5726 edid);
5727 drm_edid_to_eld(connector, edid);
5728 } else {
5729 kfree(edid);
5730 edid = ERR_PTR(-EINVAL);
5732 } else {
5733 edid = ERR_PTR(-ENOENT);
5735 intel_connector->edid = edid;
5737 /* prefer fixed mode from EDID if available */
5738 list_for_each_entry(scan, &connector->probed_modes, head) {
5739 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5740 fixed_mode = drm_mode_duplicate(dev, scan);
5741 downclock_mode = intel_dp_drrs_init(
5742 intel_connector, fixed_mode);
5743 break;
5747 /* fallback to VBT if available for eDP */
5748 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5749 fixed_mode = drm_mode_duplicate(dev,
5750 dev_priv->vbt.lfp_lvds_vbt_mode);
5751 if (fixed_mode) {
5752 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5753 connector->display_info.width_mm = fixed_mode->width_mm;
5754 connector->display_info.height_mm = fixed_mode->height_mm;
5757 mutex_unlock(&dev->mode_config.mutex);
5759 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5760 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5761 #if 0
5762 register_reboot_notifier(&intel_dp->edp_notifier);
5763 #endif
5766 * Figure out the current pipe for the initial backlight setup.
5767 * If the current pipe isn't valid, try the PPS pipe, and if that
5768 * fails just assume pipe A.
5770 if (IS_CHERRYVIEW(dev))
5771 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5772 else
5773 pipe = PORT_TO_PIPE(intel_dp->DP);
5775 if (pipe != PIPE_A && pipe != PIPE_B)
5776 pipe = intel_dp->pps_pipe;
5778 if (pipe != PIPE_A && pipe != PIPE_B)
5779 pipe = PIPE_A;
5781 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5782 pipe_name(pipe));
5785 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5786 intel_connector->panel.backlight.power = intel_edp_backlight_power;
5787 intel_panel_setup_backlight(connector, pipe);
5789 return true;
5792 bool
5793 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5794 struct intel_connector *intel_connector)
5796 struct drm_connector *connector = &intel_connector->base;
5797 struct intel_dp *intel_dp = &intel_dig_port->dp;
5798 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5799 struct drm_device *dev = intel_encoder->base.dev;
5800 struct drm_i915_private *dev_priv = dev->dev_private;
5801 enum port port = intel_dig_port->port;
5802 int type, ret;
5804 if (WARN(intel_dig_port->max_lanes < 1,
5805 "Not enough lanes (%d) for DP on port %c\n",
5806 intel_dig_port->max_lanes, port_name(port)))
5807 return false;
5809 intel_dp->pps_pipe = INVALID_PIPE;
5811 /* intel_dp vfuncs */
5812 if (INTEL_INFO(dev)->gen >= 9)
5813 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5814 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5815 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5816 else if (HAS_PCH_SPLIT(dev))
5817 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5818 else
5819 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
5821 if (INTEL_INFO(dev)->gen >= 9)
5822 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5823 else
5824 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
5826 if (HAS_DDI(dev))
5827 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5829 /* Preserve the current hw state. */
5830 intel_dp->DP = I915_READ(intel_dp->output_reg);
5831 intel_dp->attached_connector = intel_connector;
5833 if (intel_dp_is_edp(dev, port))
5834 type = DRM_MODE_CONNECTOR_eDP;
5835 else
5836 type = DRM_MODE_CONNECTOR_DisplayPort;
5839 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5840 * for DP the encoder type can be set by the caller to
5841 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5843 if (type == DRM_MODE_CONNECTOR_eDP)
5844 intel_encoder->type = INTEL_OUTPUT_EDP;
5846 /* eDP only on port B and/or C on vlv/chv */
5847 if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5848 is_edp(intel_dp) && port != PORT_B && port != PORT_C))
5849 return false;
5851 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5852 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5853 port_name(port));
5855 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5856 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5858 connector->interlace_allowed = true;
5859 connector->doublescan_allowed = 0;
5861 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5862 edp_panel_vdd_work);
5864 intel_connector_attach_encoder(intel_connector, intel_encoder);
5865 drm_connector_register(connector);
5867 if (HAS_DDI(dev))
5868 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5869 else
5870 intel_connector->get_hw_state = intel_connector_get_hw_state;
5871 intel_connector->unregister = intel_dp_connector_unregister;
5873 /* Set up the hotplug pin. */
5874 switch (port) {
5875 case PORT_A:
5876 intel_encoder->hpd_pin = HPD_PORT_A;
5877 break;
5878 case PORT_B:
5879 intel_encoder->hpd_pin = HPD_PORT_B;
5880 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
5881 intel_encoder->hpd_pin = HPD_PORT_A;
5882 break;
5883 case PORT_C:
5884 intel_encoder->hpd_pin = HPD_PORT_C;
5885 break;
5886 case PORT_D:
5887 intel_encoder->hpd_pin = HPD_PORT_D;
5888 break;
5889 case PORT_E:
5890 intel_encoder->hpd_pin = HPD_PORT_E;
5891 break;
5892 default:
5893 BUG();
5896 if (is_edp(intel_dp)) {
5897 pps_lock(intel_dp);
5898 intel_dp_init_panel_power_timestamps(intel_dp);
5899 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5900 vlv_initial_power_sequencer_setup(intel_dp);
5901 else
5902 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5903 pps_unlock(intel_dp);
5906 ret = intel_dp_aux_init(intel_dp, intel_connector);
5907 if (ret)
5908 goto fail;
5910 /* init MST on ports that can support it */
5911 if (HAS_DP_MST(dev) &&
5912 (port == PORT_B || port == PORT_C || port == PORT_D))
5913 intel_dp_mst_encoder_init(intel_dig_port,
5914 intel_connector->base.base.id);
5916 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5917 intel_dp_aux_fini(intel_dp);
5918 intel_dp_mst_encoder_cleanup(intel_dig_port);
5919 goto fail;
5922 intel_dp_add_properties(intel_dp, connector);
5924 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5925 * 0xd. Failure to do so will result in spurious interrupts being
5926 * generated on the port when a cable is not attached.
5928 if (IS_G4X(dev) && !IS_GM45(dev)) {
5929 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5930 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5933 i915_debugfs_connector_add(connector);
5935 return true;
5937 fail:
5938 if (is_edp(intel_dp)) {
5939 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5941 * vdd might still be enabled do to the delayed vdd off.
5942 * Make sure vdd is actually turned off here.
5944 pps_lock(intel_dp);
5945 edp_panel_vdd_off_sync(intel_dp);
5946 pps_unlock(intel_dp);
5948 drm_connector_unregister(connector);
5949 drm_connector_cleanup(connector);
5951 return false;
5954 bool intel_dp_init(struct drm_device *dev,
5955 i915_reg_t output_reg,
5956 enum port port)
5958 struct drm_i915_private *dev_priv = dev->dev_private;
5959 struct intel_digital_port *intel_dig_port;
5960 struct intel_encoder *intel_encoder;
5961 struct drm_encoder *encoder;
5962 struct intel_connector *intel_connector;
5964 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
5965 if (!intel_dig_port)
5966 return false;
5968 intel_connector = intel_connector_alloc();
5969 if (!intel_connector)
5970 goto err_connector_alloc;
5972 intel_encoder = &intel_dig_port->base;
5973 encoder = &intel_encoder->base;
5975 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5976 DRM_MODE_ENCODER_TMDS, NULL))
5977 goto err_encoder_init;
5979 intel_encoder->compute_config = intel_dp_compute_config;
5980 intel_encoder->disable = intel_disable_dp;
5981 intel_encoder->get_hw_state = intel_dp_get_hw_state;
5982 intel_encoder->get_config = intel_dp_get_config;
5983 intel_encoder->suspend = intel_dp_encoder_suspend;
5984 if (IS_CHERRYVIEW(dev)) {
5985 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
5986 intel_encoder->pre_enable = chv_pre_enable_dp;
5987 intel_encoder->enable = vlv_enable_dp;
5988 intel_encoder->post_disable = chv_post_disable_dp;
5989 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
5990 } else if (IS_VALLEYVIEW(dev)) {
5991 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
5992 intel_encoder->pre_enable = vlv_pre_enable_dp;
5993 intel_encoder->enable = vlv_enable_dp;
5994 intel_encoder->post_disable = vlv_post_disable_dp;
5995 } else {
5996 intel_encoder->pre_enable = g4x_pre_enable_dp;
5997 intel_encoder->enable = g4x_enable_dp;
5998 if (INTEL_INFO(dev)->gen >= 5)
5999 intel_encoder->post_disable = ilk_post_disable_dp;
6002 intel_dig_port->port = port;
6003 intel_dig_port->dp.output_reg = output_reg;
6004 intel_dig_port->max_lanes = 4;
6006 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6007 if (IS_CHERRYVIEW(dev)) {
6008 if (port == PORT_D)
6009 intel_encoder->crtc_mask = 1 << 2;
6010 else
6011 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6012 } else {
6013 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6015 intel_encoder->cloneable = 0;
6017 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6018 dev_priv->hotplug.irq_port[port] = intel_dig_port;
6020 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6021 goto err_init_connector;
6023 return true;
6025 err_init_connector:
6026 drm_encoder_cleanup(encoder);
6027 err_encoder_init:
6028 kfree(intel_connector);
6029 err_connector_alloc:
6030 kfree(intel_dig_port);
6031 return false;
6034 #if 0
6035 void intel_dp_mst_suspend(struct drm_device *dev)
6037 struct drm_i915_private *dev_priv = dev->dev_private;
6038 int i;
6040 /* disable MST */
6041 for (i = 0; i < I915_MAX_PORTS; i++) {
6042 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6043 if (!intel_dig_port)
6044 continue;
6046 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6047 if (!intel_dig_port->dp.can_mst)
6048 continue;
6049 if (intel_dig_port->dp.is_mst)
6050 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6054 #endif
6056 void intel_dp_mst_resume(struct drm_device *dev)
6058 struct drm_i915_private *dev_priv = dev->dev_private;
6059 int i;
6061 for (i = 0; i < I915_MAX_PORTS; i++) {
6062 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6063 if (!intel_dig_port)
6064 continue;
6065 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6066 int ret;
6068 if (!intel_dig_port->dp.can_mst)
6069 continue;
6071 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6072 if (ret != 0) {
6073 intel_dp_check_mst_status(&intel_dig_port->dp);