drm: Implement and use Linux struct device
[dragonfly.git] / sys / dev / drm / i915 / intel_dp.c
blob11effb7fe1e18a561b3ed60ef12fdae93d1045fe
1 /*
2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/export.h>
30 #include <linux/notifier.h>
31 #include <drm/drmP.h>
32 #include <linux/slab.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/drm_crtc.h>
35 #include <drm/drm_crtc_helper.h>
36 #include <drm/drm_edid.h>
37 #include "intel_drv.h"
38 #include <drm/i915_drm.h>
39 #include "i915_drv.h"
41 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43 static int disable_aux_irq = 0;
44 TUNABLE_INT("drm.i915.disable_aux_irq", &disable_aux_irq);
46 /* Compliance test status bits */
47 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
48 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
50 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
52 struct dp_link_dpll {
53 int clock;
54 struct dpll dpll;
57 static const struct dp_link_dpll gen4_dpll[] = {
58 { 162000,
59 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
60 { 270000,
61 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
64 static const struct dp_link_dpll pch_dpll[] = {
65 { 162000,
66 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
67 { 270000,
68 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
71 static const struct dp_link_dpll vlv_dpll[] = {
72 { 162000,
73 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
74 { 270000,
75 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
79 * CHV supports eDP 1.4 that have more link rates.
80 * Below only provides the fixed rate but exclude variable rate.
82 static const struct dp_link_dpll chv_dpll[] = {
84 * CHV requires to program fractional division for m2.
85 * m2 is stored in fixed point format using formula below
86 * (m2_int << 22) | m2_fraction
88 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
89 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
90 { 270000, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
92 { 540000, /* m2_int = 27, m2_fraction = 0 */
93 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
96 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
97 324000, 432000, 540000 };
98 static const int skl_rates[] = { 162000, 216000, 270000,
99 324000, 432000, 540000 };
100 static const int default_rates[] = { 162000, 270000, 540000 };
103 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
104 * @intel_dp: DP struct
106 * If a CPU or PCH DP output is attached to an eDP panel, this function
107 * will return true, and false otherwise.
109 static bool is_edp(struct intel_dp *intel_dp)
111 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
113 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
116 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
118 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
120 return intel_dig_port->base.base.dev;
123 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
125 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
128 static void intel_dp_link_down(struct intel_dp *intel_dp);
129 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
130 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
131 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
132 static void vlv_steal_power_sequencer(struct drm_device *dev,
133 enum i915_pipe pipe);
135 static unsigned int intel_dp_unused_lane_mask(int lane_count)
137 return ~((1 << lane_count) - 1) & 0xf;
140 static int
141 intel_dp_max_link_bw(struct intel_dp *intel_dp)
143 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
145 switch (max_link_bw) {
146 case DP_LINK_BW_1_62:
147 case DP_LINK_BW_2_7:
148 case DP_LINK_BW_5_4:
149 break;
150 default:
151 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
152 max_link_bw);
153 max_link_bw = DP_LINK_BW_1_62;
154 break;
156 return max_link_bw;
159 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
161 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
162 struct drm_device *dev = intel_dig_port->base.base.dev;
163 u8 source_max, sink_max;
165 source_max = 4;
166 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
167 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
168 source_max = 2;
170 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
172 return min(source_max, sink_max);
176 * The units on the numbers in the next two are... bizarre. Examples will
177 * make it clearer; this one parallels an example in the eDP spec.
179 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
181 * 270000 * 1 * 8 / 10 == 216000
183 * The actual data capacity of that configuration is 2.16Gbit/s, so the
184 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
185 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
186 * 119000. At 18bpp that's 2142000 kilobits per second.
188 * Thus the strange-looking division by 10 in intel_dp_link_required, to
189 * get the result in decakilobits instead of kilobits.
192 static int
193 intel_dp_link_required(int pixel_clock, int bpp)
195 return (pixel_clock * bpp + 9) / 10;
198 static int
199 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
201 return (max_link_clock * max_lanes * 8) / 10;
204 static enum drm_mode_status
205 intel_dp_mode_valid(struct drm_connector *connector,
206 struct drm_display_mode *mode)
208 struct intel_dp *intel_dp = intel_attached_dp(connector);
209 struct intel_connector *intel_connector = to_intel_connector(connector);
210 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
211 int target_clock = mode->clock;
212 int max_rate, mode_rate, max_lanes, max_link_clock;
214 if (is_edp(intel_dp) && fixed_mode) {
215 if (mode->hdisplay > fixed_mode->hdisplay)
216 return MODE_PANEL;
218 if (mode->vdisplay > fixed_mode->vdisplay)
219 return MODE_PANEL;
221 target_clock = fixed_mode->clock;
224 max_link_clock = intel_dp_max_link_rate(intel_dp);
225 max_lanes = intel_dp_max_lane_count(intel_dp);
227 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
228 mode_rate = intel_dp_link_required(target_clock, 18);
230 if (mode_rate > max_rate)
231 return MODE_CLOCK_HIGH;
233 if (mode->clock < 10000)
234 return MODE_CLOCK_LOW;
236 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
237 return MODE_H_ILLEGAL;
239 return MODE_OK;
242 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
244 int i;
245 uint32_t v = 0;
247 if (src_bytes > 4)
248 src_bytes = 4;
249 for (i = 0; i < src_bytes; i++)
250 v |= ((uint32_t) src[i]) << ((3-i) * 8);
251 return v;
254 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
256 int i;
257 if (dst_bytes > 4)
258 dst_bytes = 4;
259 for (i = 0; i < dst_bytes; i++)
260 dst[i] = src >> ((3-i) * 8);
263 static void
264 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
265 struct intel_dp *intel_dp);
266 static void
267 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
268 struct intel_dp *intel_dp);
270 static void pps_lock(struct intel_dp *intel_dp)
272 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
273 struct intel_encoder *encoder = &intel_dig_port->base;
274 struct drm_device *dev = encoder->base.dev;
275 struct drm_i915_private *dev_priv = dev->dev_private;
276 enum intel_display_power_domain power_domain;
279 * See vlv_power_sequencer_reset() why we need
280 * a power domain reference here.
282 power_domain = intel_display_port_aux_power_domain(encoder);
283 intel_display_power_get(dev_priv, power_domain);
285 mutex_lock(&dev_priv->pps_mutex);
288 static void pps_unlock(struct intel_dp *intel_dp)
290 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
291 struct intel_encoder *encoder = &intel_dig_port->base;
292 struct drm_device *dev = encoder->base.dev;
293 struct drm_i915_private *dev_priv = dev->dev_private;
294 enum intel_display_power_domain power_domain;
296 mutex_unlock(&dev_priv->pps_mutex);
298 power_domain = intel_display_port_aux_power_domain(encoder);
299 intel_display_power_put(dev_priv, power_domain);
302 static void
303 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
305 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
306 struct drm_device *dev = intel_dig_port->base.base.dev;
307 struct drm_i915_private *dev_priv = dev->dev_private;
308 enum i915_pipe pipe = intel_dp->pps_pipe;
309 bool pll_enabled, release_cl_override = false;
310 enum dpio_phy phy = DPIO_PHY(pipe);
311 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
312 uint32_t DP;
314 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
315 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
316 pipe_name(pipe), port_name(intel_dig_port->port)))
317 return;
319 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
320 pipe_name(pipe), port_name(intel_dig_port->port));
322 /* Preserve the BIOS-computed detected bit. This is
323 * supposed to be read-only.
325 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
326 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
327 DP |= DP_PORT_WIDTH(1);
328 DP |= DP_LINK_TRAIN_PAT_1;
330 if (IS_CHERRYVIEW(dev))
331 DP |= DP_PIPE_SELECT_CHV(pipe);
332 else if (pipe == PIPE_B)
333 DP |= DP_PIPEB_SELECT;
335 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
338 * The DPLL for the pipe must be enabled for this to work.
339 * So enable temporarily it if it's not already enabled.
341 if (!pll_enabled) {
342 release_cl_override = IS_CHERRYVIEW(dev) &&
343 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
345 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
346 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
350 * Similar magic as in intel_dp_enable_port().
351 * We _must_ do this port enable + disable trick
352 * to make this power seqeuencer lock onto the port.
353 * Otherwise even VDD force bit won't work.
355 I915_WRITE(intel_dp->output_reg, DP);
356 POSTING_READ(intel_dp->output_reg);
358 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
359 POSTING_READ(intel_dp->output_reg);
361 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
362 POSTING_READ(intel_dp->output_reg);
364 if (!pll_enabled) {
365 vlv_force_pll_off(dev, pipe);
367 if (release_cl_override)
368 chv_phy_powergate_ch(dev_priv, phy, ch, false);
372 static enum i915_pipe
373 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
375 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
376 struct drm_device *dev = intel_dig_port->base.base.dev;
377 struct drm_i915_private *dev_priv = dev->dev_private;
378 struct intel_encoder *encoder;
379 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
380 enum i915_pipe pipe;
382 lockdep_assert_held(&dev_priv->pps_mutex);
384 /* We should never land here with regular DP ports */
385 WARN_ON(!is_edp(intel_dp));
387 if (intel_dp->pps_pipe != INVALID_PIPE)
388 return intel_dp->pps_pipe;
391 * We don't have power sequencer currently.
392 * Pick one that's not used by other ports.
394 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
395 base.head) {
396 struct intel_dp *tmp;
398 if (encoder->type != INTEL_OUTPUT_EDP)
399 continue;
401 tmp = enc_to_intel_dp(&encoder->base);
403 if (tmp->pps_pipe != INVALID_PIPE)
404 pipes &= ~(1 << tmp->pps_pipe);
408 * Didn't find one. This should not happen since there
409 * are two power sequencers and up to two eDP ports.
411 if (WARN_ON(pipes == 0))
412 pipe = PIPE_A;
413 else
414 pipe = ffs(pipes) - 1;
416 vlv_steal_power_sequencer(dev, pipe);
417 intel_dp->pps_pipe = pipe;
419 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
420 pipe_name(intel_dp->pps_pipe),
421 port_name(intel_dig_port->port));
423 /* init power sequencer on this pipe and port */
424 intel_dp_init_panel_power_sequencer(dev, intel_dp);
425 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
428 * Even vdd force doesn't work until we've made
429 * the power sequencer lock in on the port.
431 vlv_power_sequencer_kick(intel_dp);
433 return intel_dp->pps_pipe;
436 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
437 enum i915_pipe pipe);
439 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
440 enum i915_pipe pipe)
442 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
445 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
446 enum i915_pipe pipe)
448 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
451 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
452 enum i915_pipe pipe)
454 return true;
457 static enum i915_pipe
458 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
459 enum port port,
460 vlv_pipe_check pipe_check)
462 enum i915_pipe pipe;
464 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
465 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
466 PANEL_PORT_SELECT_MASK;
468 if (port_sel != PANEL_PORT_SELECT_VLV(port))
469 continue;
471 if (!pipe_check(dev_priv, pipe))
472 continue;
474 return pipe;
477 return INVALID_PIPE;
480 static void
481 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
483 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
484 struct drm_device *dev = intel_dig_port->base.base.dev;
485 struct drm_i915_private *dev_priv = dev->dev_private;
486 enum port port = intel_dig_port->port;
488 lockdep_assert_held(&dev_priv->pps_mutex);
490 /* try to find a pipe with this port selected */
491 /* first pick one where the panel is on */
492 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
493 vlv_pipe_has_pp_on);
494 /* didn't find one? pick one where vdd is on */
495 if (intel_dp->pps_pipe == INVALID_PIPE)
496 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
497 vlv_pipe_has_vdd_on);
498 /* didn't find one? pick one with just the correct port */
499 if (intel_dp->pps_pipe == INVALID_PIPE)
500 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
501 vlv_pipe_any);
503 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
504 if (intel_dp->pps_pipe == INVALID_PIPE) {
505 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
506 port_name(port));
507 return;
510 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
511 port_name(port), pipe_name(intel_dp->pps_pipe));
513 intel_dp_init_panel_power_sequencer(dev, intel_dp);
514 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
517 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
519 struct drm_device *dev = dev_priv->dev;
520 struct intel_encoder *encoder;
522 if (WARN_ON(!IS_VALLEYVIEW(dev)))
523 return;
526 * We can't grab pps_mutex here due to deadlock with power_domain
527 * mutex when power_domain functions are called while holding pps_mutex.
528 * That also means that in order to use pps_pipe the code needs to
529 * hold both a power domain reference and pps_mutex, and the power domain
530 * reference get/put must be done while _not_ holding pps_mutex.
531 * pps_{lock,unlock}() do these steps in the correct order, so one
532 * should use them always.
535 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
536 struct intel_dp *intel_dp;
538 if (encoder->type != INTEL_OUTPUT_EDP)
539 continue;
541 intel_dp = enc_to_intel_dp(&encoder->base);
542 intel_dp->pps_pipe = INVALID_PIPE;
546 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
548 struct drm_device *dev = intel_dp_to_dev(intel_dp);
550 if (IS_BROXTON(dev))
551 return BXT_PP_CONTROL(0);
552 else if (HAS_PCH_SPLIT(dev))
553 return PCH_PP_CONTROL;
554 else
555 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
558 static u32 _pp_stat_reg(struct intel_dp *intel_dp)
560 struct drm_device *dev = intel_dp_to_dev(intel_dp);
562 if (IS_BROXTON(dev))
563 return BXT_PP_STATUS(0);
564 else if (HAS_PCH_SPLIT(dev))
565 return PCH_PP_STATUS;
566 else
567 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
570 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
571 This function only applicable when panel PM state is not to be tracked */
572 #if 0
573 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
574 void *unused)
576 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
577 edp_notifier);
578 struct drm_device *dev = intel_dp_to_dev(intel_dp);
579 struct drm_i915_private *dev_priv = dev->dev_private;
581 if (!is_edp(intel_dp) || code != SYS_RESTART)
582 return 0;
584 pps_lock(intel_dp);
586 if (IS_VALLEYVIEW(dev)) {
587 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
588 u32 pp_ctrl_reg, pp_div_reg;
589 u32 pp_div;
591 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
592 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
593 pp_div = I915_READ(pp_div_reg);
594 pp_div &= PP_REFERENCE_DIVIDER_MASK;
596 /* 0x1F write to PP_DIV_REG sets max cycle delay */
597 I915_WRITE(pp_div_reg, pp_div | 0x1F);
598 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
599 msleep(intel_dp->panel_power_cycle_delay);
602 pps_unlock(intel_dp);
604 return 0;
606 #endif
608 static bool edp_have_panel_power(struct intel_dp *intel_dp)
610 struct drm_device *dev = intel_dp_to_dev(intel_dp);
611 struct drm_i915_private *dev_priv = dev->dev_private;
613 lockdep_assert_held(&dev_priv->pps_mutex);
615 if (IS_VALLEYVIEW(dev) &&
616 intel_dp->pps_pipe == INVALID_PIPE)
617 return false;
619 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
622 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
624 struct drm_device *dev = intel_dp_to_dev(intel_dp);
625 struct drm_i915_private *dev_priv = dev->dev_private;
627 lockdep_assert_held(&dev_priv->pps_mutex);
629 if (IS_VALLEYVIEW(dev) &&
630 intel_dp->pps_pipe == INVALID_PIPE)
631 return false;
633 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
636 static void
637 intel_dp_check_edp(struct intel_dp *intel_dp)
639 struct drm_device *dev = intel_dp_to_dev(intel_dp);
640 struct drm_i915_private *dev_priv = dev->dev_private;
642 if (!is_edp(intel_dp))
643 return;
645 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
646 WARN(1, "eDP powered off while attempting aux channel communication.\n");
647 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
648 I915_READ(_pp_stat_reg(intel_dp)),
649 I915_READ(_pp_ctrl_reg(intel_dp)));
653 static uint32_t
654 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
656 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
657 struct drm_device *dev = intel_dig_port->base.base.dev;
658 struct drm_i915_private *dev_priv = dev->dev_private;
659 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
660 uint32_t status;
661 bool done;
663 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
664 if (has_aux_irq)
665 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
666 msecs_to_jiffies_timeout(10));
667 else
668 done = wait_for_atomic(C, 10) == 0;
669 if (!done)
670 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
671 has_aux_irq);
672 #undef C
674 return status;
677 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
679 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
680 struct drm_device *dev = intel_dig_port->base.base.dev;
683 * The clock divider is based off the hrawclk, and would like to run at
684 * 2MHz. So, take the hrawclk value and divide by 2 and use that
686 return index ? 0 : intel_hrawclk(dev) / 2;
689 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
691 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
692 struct drm_device *dev = intel_dig_port->base.base.dev;
693 struct drm_i915_private *dev_priv = dev->dev_private;
695 if (index)
696 return 0;
698 if (intel_dig_port->port == PORT_A) {
699 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
701 } else {
702 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
706 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
708 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
709 struct drm_device *dev = intel_dig_port->base.base.dev;
710 struct drm_i915_private *dev_priv = dev->dev_private;
712 if (intel_dig_port->port == PORT_A) {
713 if (index)
714 return 0;
715 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
716 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
717 /* Workaround for non-ULT HSW */
718 switch (index) {
719 case 0: return 63;
720 case 1: return 72;
721 default: return 0;
723 } else {
724 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
728 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
730 return index ? 0 : 100;
733 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
736 * SKL doesn't need us to program the AUX clock divider (Hardware will
737 * derive the clock from CDCLK automatically). We still implement the
738 * get_aux_clock_divider vfunc to plug-in into the existing code.
740 return index ? 0 : 1;
743 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
744 bool has_aux_irq,
745 int send_bytes,
746 uint32_t aux_clock_divider)
748 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
749 struct drm_device *dev = intel_dig_port->base.base.dev;
750 uint32_t precharge, timeout;
752 if (IS_GEN6(dev))
753 precharge = 3;
754 else
755 precharge = 5;
757 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
758 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
759 else
760 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
762 return DP_AUX_CH_CTL_SEND_BUSY |
763 DP_AUX_CH_CTL_DONE |
764 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
765 DP_AUX_CH_CTL_TIME_OUT_ERROR |
766 timeout |
767 DP_AUX_CH_CTL_RECEIVE_ERROR |
768 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
769 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
770 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
773 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
774 bool has_aux_irq,
775 int send_bytes,
776 uint32_t unused)
778 return DP_AUX_CH_CTL_SEND_BUSY |
779 DP_AUX_CH_CTL_DONE |
780 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
781 DP_AUX_CH_CTL_TIME_OUT_ERROR |
782 DP_AUX_CH_CTL_TIME_OUT_1600us |
783 DP_AUX_CH_CTL_RECEIVE_ERROR |
784 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
785 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
788 static int
789 intel_dp_aux_ch(struct intel_dp *intel_dp,
790 const uint8_t *send, int send_bytes,
791 uint8_t *recv, int recv_size)
793 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
794 struct drm_device *dev = intel_dig_port->base.base.dev;
795 struct drm_i915_private *dev_priv = dev->dev_private;
796 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
797 uint32_t ch_data = ch_ctl + 4;
798 uint32_t aux_clock_divider;
799 int i, ret, recv_bytes;
800 uint32_t status;
801 int try, clock = 0;
802 #ifdef __DragonFly__
803 bool has_aux_irq = HAS_AUX_IRQ(dev) && !disable_aux_irq;
804 #else
805 bool has_aux_irq = HAS_AUX_IRQ(dev);
806 #endif
807 bool vdd;
809 pps_lock(intel_dp);
812 * We will be called with VDD already enabled for dpcd/edid/oui reads.
813 * In such cases we want to leave VDD enabled and it's up to upper layers
814 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
815 * ourselves.
817 vdd = edp_panel_vdd_on(intel_dp);
819 /* dp aux is extremely sensitive to irq latency, hence request the
820 * lowest possible wakeup latency and so prevent the cpu from going into
821 * deep sleep states.
823 pm_qos_update_request(&dev_priv->pm_qos, 0);
825 intel_dp_check_edp(intel_dp);
827 /* Try to wait for any previous AUX channel activity */
828 for (try = 0; try < 3; try++) {
829 status = I915_READ_NOTRACE(ch_ctl);
830 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
831 break;
832 msleep(1);
835 if (try == 3) {
836 static u32 last_status = -1;
837 const u32 status = I915_READ(ch_ctl);
839 if (status != last_status) {
840 WARN(1, "dp_aux_ch not started status 0x%08x\n",
841 status);
842 last_status = status;
845 ret = -EBUSY;
846 goto out;
849 /* Only 5 data registers! */
850 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
851 ret = -E2BIG;
852 goto out;
855 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
856 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
857 has_aux_irq,
858 send_bytes,
859 aux_clock_divider);
861 /* Must try at least 3 times according to DP spec */
862 for (try = 0; try < 5; try++) {
863 /* Load the send data into the aux channel data registers */
864 for (i = 0; i < send_bytes; i += 4)
865 I915_WRITE(ch_data + i,
866 intel_dp_pack_aux(send + i,
867 send_bytes - i));
869 /* Send the command and wait for it to complete */
870 I915_WRITE(ch_ctl, send_ctl);
872 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
874 /* Clear done status and any errors */
875 I915_WRITE(ch_ctl,
876 status |
877 DP_AUX_CH_CTL_DONE |
878 DP_AUX_CH_CTL_TIME_OUT_ERROR |
879 DP_AUX_CH_CTL_RECEIVE_ERROR);
881 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
882 continue;
884 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
885 * 400us delay required for errors and timeouts
886 * Timeout errors from the HW already meet this
887 * requirement so skip to next iteration
889 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
890 usleep_range(400, 500);
891 continue;
893 if (status & DP_AUX_CH_CTL_DONE)
894 goto done;
898 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
899 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
900 ret = -EBUSY;
901 goto out;
904 done:
905 /* Check for timeout or receive error.
906 * Timeouts occur when the sink is not connected
908 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
909 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
910 ret = -EIO;
911 goto out;
914 /* Timeouts occur when the device isn't connected, so they're
915 * "normal" -- don't fill the kernel log with these */
916 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
917 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
918 ret = -ETIMEDOUT;
919 goto out;
922 /* Unload any bytes sent back from the other side */
923 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
924 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
925 if (recv_bytes > recv_size)
926 recv_bytes = recv_size;
928 for (i = 0; i < recv_bytes; i += 4)
929 intel_dp_unpack_aux(I915_READ(ch_data + i),
930 recv + i, recv_bytes - i);
932 ret = recv_bytes;
933 out:
934 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
936 if (vdd)
937 edp_panel_vdd_off(intel_dp, false);
939 pps_unlock(intel_dp);
941 return ret;
944 #define BARE_ADDRESS_SIZE 3
945 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
946 static ssize_t
947 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
949 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
950 uint8_t txbuf[20], rxbuf[20];
951 size_t txsize, rxsize;
952 int ret;
954 txbuf[0] = (msg->request << 4) |
955 ((msg->address >> 16) & 0xf);
956 txbuf[1] = (msg->address >> 8) & 0xff;
957 txbuf[2] = msg->address & 0xff;
958 txbuf[3] = msg->size - 1;
960 switch (msg->request & ~DP_AUX_I2C_MOT) {
961 case DP_AUX_NATIVE_WRITE:
962 case DP_AUX_I2C_WRITE:
963 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
964 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
965 rxsize = 2; /* 0 or 1 data bytes */
967 if (WARN_ON(txsize > 20))
968 return -E2BIG;
970 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
972 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
973 if (ret > 0) {
974 msg->reply = rxbuf[0] >> 4;
976 if (ret > 1) {
977 /* Number of bytes written in a short write. */
978 ret = clamp_t(int, rxbuf[1], 0, msg->size);
979 } else {
980 /* Return payload size. */
981 ret = msg->size;
984 break;
986 case DP_AUX_NATIVE_READ:
987 case DP_AUX_I2C_READ:
988 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
989 rxsize = msg->size + 1;
991 if (WARN_ON(rxsize > 20))
992 return -E2BIG;
994 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
995 if (ret > 0) {
996 msg->reply = rxbuf[0] >> 4;
998 * Assume happy day, and copy the data. The caller is
999 * expected to check msg->reply before touching it.
1001 * Return payload size.
1003 ret--;
1004 memcpy(msg->buffer, rxbuf + 1, ret);
1006 break;
1008 default:
1009 ret = -EINVAL;
1010 break;
1013 return ret;
1016 static int
1017 intel_dp_i2c_aux_ch(device_t adapter, int mode,
1018 uint8_t write_byte, uint8_t *read_byte)
1020 struct i2c_algo_dp_aux_data *data = device_get_softc(adapter);
1021 struct intel_dp *intel_dp = data->priv;
1022 uint16_t address = data->address;
1023 uint8_t msg[5];
1024 uint8_t reply[2];
1025 unsigned retry;
1026 int msg_bytes;
1027 int reply_bytes;
1028 int ret;
1030 intel_edp_panel_vdd_on(intel_dp);
1031 intel_dp_check_edp(intel_dp);
1032 /* Set up the command byte */
1033 if (mode & MODE_I2C_READ)
1034 msg[0] = DP_AUX_I2C_READ << 4;
1035 else
1036 msg[0] = DP_AUX_I2C_WRITE << 4;
1038 if (!(mode & MODE_I2C_STOP))
1039 msg[0] |= DP_AUX_I2C_MOT << 4;
1041 msg[1] = address >> 8;
1042 msg[2] = address;
1044 switch (mode) {
1045 case MODE_I2C_WRITE:
1046 msg[3] = 0;
1047 msg[4] = write_byte;
1048 msg_bytes = 5;
1049 reply_bytes = 1;
1050 break;
1051 case MODE_I2C_READ:
1052 msg[3] = 0;
1053 msg_bytes = 4;
1054 reply_bytes = 2;
1055 break;
1056 default:
1057 msg_bytes = 3;
1058 reply_bytes = 1;
1059 break;
1063 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
1064 * required to retry at least seven times upon receiving AUX_DEFER
1065 * before giving up the AUX transaction.
1067 for (retry = 0; retry < 7; retry++) {
1068 ret = intel_dp_aux_ch(intel_dp,
1069 msg, msg_bytes,
1070 reply, reply_bytes);
1071 if (ret < 0) {
1072 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
1073 goto out;
1076 switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) {
1077 case DP_AUX_NATIVE_REPLY_ACK:
1078 /* I2C-over-AUX Reply field is only valid
1079 * when paired with AUX ACK.
1081 break;
1082 case DP_AUX_NATIVE_REPLY_NACK:
1083 DRM_DEBUG_KMS("aux_ch native nack\n");
1084 ret = -EREMOTEIO;
1085 goto out;
1086 case DP_AUX_NATIVE_REPLY_DEFER:
1088 * For now, just give more slack to branch devices. We
1089 * could check the DPCD for I2C bit rate capabilities,
1090 * and if available, adjust the interval. We could also
1091 * be more careful with DP-to-Legacy adapters where a
1092 * long legacy cable may force very low I2C bit rates.
1094 if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
1095 DP_DWN_STRM_PORT_PRESENT)
1096 usleep_range(500, 600);
1097 else
1098 usleep_range(300, 400);
1099 continue;
1100 default:
1101 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
1102 reply[0]);
1103 ret = -EREMOTEIO;
1104 goto out;
1107 switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) {
1108 case DP_AUX_I2C_REPLY_ACK:
1109 if (mode == MODE_I2C_READ) {
1110 *read_byte = reply[1];
1112 ret = 0; /* reply_bytes - 1 */
1113 goto out;
1114 case DP_AUX_I2C_REPLY_NACK:
1115 DRM_DEBUG_KMS("aux_i2c nack\n");
1116 ret = -EREMOTEIO;
1117 goto out;
1118 case DP_AUX_I2C_REPLY_DEFER:
1119 DRM_DEBUG_KMS("aux_i2c defer\n");
1120 udelay(100);
1121 break;
1122 default:
1123 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
1124 ret = -EREMOTEIO;
1125 goto out;
1129 DRM_ERROR("too many retries, giving up\n");
1130 ret = -EREMOTEIO;
1132 out:
1133 return ret;
1136 static void
1137 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1139 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1140 struct drm_i915_private *dev_priv = dev->dev_private;
1141 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1142 enum port port = intel_dig_port->port;
1143 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
1144 const char *name = NULL;
1145 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1146 int ret;
1148 /* On SKL we don't have Aux for port E so we rely on VBT to set
1149 * a proper alternate aux channel.
1151 if (IS_SKYLAKE(dev) && port == PORT_E) {
1152 switch (info->alternate_aux_channel) {
1153 case DP_AUX_B:
1154 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1155 break;
1156 case DP_AUX_C:
1157 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1158 break;
1159 case DP_AUX_D:
1160 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1161 break;
1162 case DP_AUX_A:
1163 default:
1164 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1168 switch (port) {
1169 case PORT_A:
1170 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1171 name = "DPDDC-A";
1172 break;
1173 case PORT_B:
1174 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1175 name = "DPDDC-B";
1176 break;
1177 case PORT_C:
1178 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1179 name = "DPDDC-C";
1180 break;
1181 case PORT_D:
1182 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1183 name = "DPDDC-D";
1184 break;
1185 case PORT_E:
1186 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1187 name = "DPDDC-E";
1188 break;
1189 default:
1190 BUG();
1194 * The AUX_CTL register is usually DP_CTL + 0x10.
1196 * On Haswell and Broadwell though:
1197 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1198 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1200 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1202 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
1203 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1205 intel_dp->aux.name = name;
1206 intel_dp->aux.dev = dev->dev;
1207 intel_dp->aux.transfer = intel_dp_aux_transfer;
1209 DRM_DEBUG_KMS("i2c_init %s\n", name);
1211 ret = iic_dp_aux_add_bus(connector->base.dev->dev->bsddev, name,
1212 intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus,
1213 &intel_dp->aux.ddc);
1214 WARN(ret, "intel_dp_i2c_init failed with error %d for port %c\n",
1215 ret, port_name(port));
1217 #if 0
1218 ret = sysfs_create_link(&connector->base.kdev->kobj,
1219 &intel_dp->aux.ddc.dev.kobj,
1220 intel_dp->aux.ddc.dev.kobj.name);
1221 if (ret < 0) {
1222 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1223 drm_dp_aux_unregister(&intel_dp->aux);
1225 #endif
1228 static void
1229 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1231 #if 0
1232 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1234 if (!intel_connector->mst_port)
1235 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1236 intel_dp->aux.ddc.dev.kobj.name);
1237 #endif
1238 intel_connector_unregister(intel_connector);
1241 static void
1242 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1244 u32 ctrl1;
1246 memset(&pipe_config->dpll_hw_state, 0,
1247 sizeof(pipe_config->dpll_hw_state));
1249 pipe_config->ddi_pll_sel = SKL_DPLL0;
1250 pipe_config->dpll_hw_state.cfgcr1 = 0;
1251 pipe_config->dpll_hw_state.cfgcr2 = 0;
1253 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1254 switch (pipe_config->port_clock / 2) {
1255 case 81000:
1256 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1257 SKL_DPLL0);
1258 break;
1259 case 135000:
1260 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1261 SKL_DPLL0);
1262 break;
1263 case 270000:
1264 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1265 SKL_DPLL0);
1266 break;
1267 case 162000:
1268 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1269 SKL_DPLL0);
1270 break;
1271 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1272 results in CDCLK change. Need to handle the change of CDCLK by
1273 disabling pipes and re-enabling them */
1274 case 108000:
1275 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1276 SKL_DPLL0);
1277 break;
1278 case 216000:
1279 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1280 SKL_DPLL0);
1281 break;
1284 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1287 void
1288 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1290 memset(&pipe_config->dpll_hw_state, 0,
1291 sizeof(pipe_config->dpll_hw_state));
1293 switch (pipe_config->port_clock / 2) {
1294 case 81000:
1295 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1296 break;
1297 case 135000:
1298 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1299 break;
1300 case 270000:
1301 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1302 break;
1306 static int
1307 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1309 if (intel_dp->num_sink_rates) {
1310 *sink_rates = intel_dp->sink_rates;
1311 return intel_dp->num_sink_rates;
1314 *sink_rates = default_rates;
1316 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1319 static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1321 /* WaDisableHBR2:skl */
1322 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1323 return false;
1325 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1326 (INTEL_INFO(dev)->gen >= 9))
1327 return true;
1328 else
1329 return false;
1332 static int
1333 intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1335 int size;
1337 if (IS_BROXTON(dev)) {
1338 *source_rates = bxt_rates;
1339 size = ARRAY_SIZE(bxt_rates);
1340 } else if (IS_SKYLAKE(dev)) {
1341 *source_rates = skl_rates;
1342 size = ARRAY_SIZE(skl_rates);
1343 } else {
1344 *source_rates = default_rates;
1345 size = ARRAY_SIZE(default_rates);
1348 /* This depends on the fact that 5.4 is last value in the array */
1349 if (!intel_dp_source_supports_hbr2(dev))
1350 size--;
1352 return size;
1355 static void
1356 intel_dp_set_clock(struct intel_encoder *encoder,
1357 struct intel_crtc_state *pipe_config)
1359 struct drm_device *dev = encoder->base.dev;
1360 const struct dp_link_dpll *divisor = NULL;
1361 int i, count = 0;
1363 if (IS_G4X(dev)) {
1364 divisor = gen4_dpll;
1365 count = ARRAY_SIZE(gen4_dpll);
1366 } else if (HAS_PCH_SPLIT(dev)) {
1367 divisor = pch_dpll;
1368 count = ARRAY_SIZE(pch_dpll);
1369 } else if (IS_CHERRYVIEW(dev)) {
1370 divisor = chv_dpll;
1371 count = ARRAY_SIZE(chv_dpll);
1372 } else if (IS_VALLEYVIEW(dev)) {
1373 divisor = vlv_dpll;
1374 count = ARRAY_SIZE(vlv_dpll);
1377 if (divisor && count) {
1378 for (i = 0; i < count; i++) {
1379 if (pipe_config->port_clock == divisor[i].clock) {
1380 pipe_config->dpll = divisor[i].dpll;
1381 pipe_config->clock_set = true;
1382 break;
1388 static int intersect_rates(const int *source_rates, int source_len,
1389 const int *sink_rates, int sink_len,
1390 int *common_rates)
1392 int i = 0, j = 0, k = 0;
1394 while (i < source_len && j < sink_len) {
1395 if (source_rates[i] == sink_rates[j]) {
1396 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1397 return k;
1398 common_rates[k] = source_rates[i];
1399 ++k;
1400 ++i;
1401 ++j;
1402 } else if (source_rates[i] < sink_rates[j]) {
1403 ++i;
1404 } else {
1405 ++j;
1408 return k;
1411 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1412 int *common_rates)
1414 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1415 const int *source_rates, *sink_rates;
1416 int source_len, sink_len;
1418 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1419 source_len = intel_dp_source_rates(dev, &source_rates);
1421 return intersect_rates(source_rates, source_len,
1422 sink_rates, sink_len,
1423 common_rates);
1426 static void snprintf_int_array(char *str, size_t len,
1427 const int *array, int nelem)
1429 int i;
1431 str[0] = '\0';
1433 for (i = 0; i < nelem; i++) {
1434 int r = ksnprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1435 if (r >= len)
1436 return;
1437 str += r;
1438 len -= r;
1442 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1444 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1445 const int *source_rates, *sink_rates;
1446 int source_len, sink_len, common_len;
1447 int common_rates[DP_MAX_SUPPORTED_RATES];
1448 char str[128]; /* FIXME: too big for stack? */
1450 if ((drm_debug & DRM_UT_KMS) == 0)
1451 return;
1453 source_len = intel_dp_source_rates(dev, &source_rates);
1454 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1455 DRM_DEBUG_KMS("source rates: %s\n", str);
1457 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1458 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1459 DRM_DEBUG_KMS("sink rates: %s\n", str);
1461 common_len = intel_dp_common_rates(intel_dp, common_rates);
1462 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1463 DRM_DEBUG_KMS("common rates: %s\n", str);
1466 static int rate_to_index(int find, const int *rates)
1468 int i = 0;
1470 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1471 if (find == rates[i])
1472 break;
1474 return i;
1478 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1480 int rates[DP_MAX_SUPPORTED_RATES] = {};
1481 int len;
1483 len = intel_dp_common_rates(intel_dp, rates);
1484 if (WARN_ON(len <= 0))
1485 return 162000;
1487 return rates[rate_to_index(0, rates) - 1];
1490 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1492 return rate_to_index(rate, intel_dp->sink_rates);
1495 static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1496 uint8_t *link_bw, uint8_t *rate_select)
1498 if (intel_dp->num_sink_rates) {
1499 *link_bw = 0;
1500 *rate_select =
1501 intel_dp_rate_select(intel_dp, port_clock);
1502 } else {
1503 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1504 *rate_select = 0;
1508 bool
1509 intel_dp_compute_config(struct intel_encoder *encoder,
1510 struct intel_crtc_state *pipe_config)
1512 struct drm_device *dev = encoder->base.dev;
1513 struct drm_i915_private *dev_priv = dev->dev_private;
1514 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1515 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1516 enum port port = dp_to_dig_port(intel_dp)->port;
1517 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1518 struct intel_connector *intel_connector = intel_dp->attached_connector;
1519 int lane_count, clock;
1520 int min_lane_count = 1;
1521 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1522 /* Conveniently, the link BW constants become indices with a shift...*/
1523 int min_clock = 0;
1524 int max_clock;
1525 int bpp, mode_rate;
1526 int link_avail, link_clock;
1527 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1528 int common_len;
1529 uint8_t link_bw, rate_select;
1531 common_len = intel_dp_common_rates(intel_dp, common_rates);
1533 /* No common link rates between source and sink */
1534 WARN_ON(common_len <= 0);
1536 max_clock = common_len - 1;
1538 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1539 pipe_config->has_pch_encoder = true;
1541 pipe_config->has_dp_encoder = true;
1542 pipe_config->has_drrs = false;
1543 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1545 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1546 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1547 adjusted_mode);
1549 if (INTEL_INFO(dev)->gen >= 9) {
1550 int ret;
1551 ret = skl_update_scaler_crtc(pipe_config);
1552 if (ret)
1553 return ret;
1556 if (!HAS_PCH_SPLIT(dev))
1557 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1558 intel_connector->panel.fitting_mode);
1559 else
1560 intel_pch_panel_fitting(intel_crtc, pipe_config,
1561 intel_connector->panel.fitting_mode);
1564 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1565 return false;
1567 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1568 "max bw %d pixel clock %iKHz\n",
1569 max_lane_count, common_rates[max_clock],
1570 adjusted_mode->crtc_clock);
1572 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1573 * bpc in between. */
1574 bpp = pipe_config->pipe_bpp;
1575 if (is_edp(intel_dp)) {
1577 /* Get bpp from vbt only for panels that dont have bpp in edid */
1578 if (intel_connector->base.display_info.bpc == 0 &&
1579 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1580 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1581 dev_priv->vbt.edp_bpp);
1582 bpp = dev_priv->vbt.edp_bpp;
1586 * Use the maximum clock and number of lanes the eDP panel
1587 * advertizes being capable of. The panels are generally
1588 * designed to support only a single clock and lane
1589 * configuration, and typically these values correspond to the
1590 * native resolution of the panel.
1592 min_lane_count = max_lane_count;
1593 min_clock = max_clock;
1596 for (; bpp >= 6*3; bpp -= 2*3) {
1597 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1598 bpp);
1600 for (clock = min_clock; clock <= max_clock; clock++) {
1601 for (lane_count = min_lane_count;
1602 lane_count <= max_lane_count;
1603 lane_count <<= 1) {
1605 link_clock = common_rates[clock];
1606 link_avail = intel_dp_max_data_rate(link_clock,
1607 lane_count);
1609 if (mode_rate <= link_avail) {
1610 goto found;
1616 return false;
1618 found:
1619 if (intel_dp->color_range_auto) {
1621 * See:
1622 * CEA-861-E - 5.1 Default Encoding Parameters
1623 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1625 pipe_config->limited_color_range =
1626 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1627 } else {
1628 pipe_config->limited_color_range =
1629 intel_dp->limited_color_range;
1632 pipe_config->lane_count = lane_count;
1634 pipe_config->pipe_bpp = bpp;
1635 pipe_config->port_clock = common_rates[clock];
1637 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1638 &link_bw, &rate_select);
1640 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1641 link_bw, rate_select, pipe_config->lane_count,
1642 pipe_config->port_clock, bpp);
1643 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1644 mode_rate, link_avail);
1646 intel_link_compute_m_n(bpp, lane_count,
1647 adjusted_mode->crtc_clock,
1648 pipe_config->port_clock,
1649 &pipe_config->dp_m_n);
1651 if (intel_connector->panel.downclock_mode != NULL &&
1652 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1653 pipe_config->has_drrs = true;
1654 intel_link_compute_m_n(bpp, lane_count,
1655 intel_connector->panel.downclock_mode->clock,
1656 pipe_config->port_clock,
1657 &pipe_config->dp_m2_n2);
1660 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1661 skl_edp_set_pll_config(pipe_config);
1662 else if (IS_BROXTON(dev))
1663 /* handled in ddi */;
1664 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1665 hsw_dp_set_ddi_pll_sel(pipe_config);
1666 else
1667 intel_dp_set_clock(encoder, pipe_config);
1669 return true;
1672 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1674 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1675 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1676 struct drm_device *dev = crtc->base.dev;
1677 struct drm_i915_private *dev_priv = dev->dev_private;
1678 u32 dpa_ctl;
1680 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1681 crtc->config->port_clock);
1682 dpa_ctl = I915_READ(DP_A);
1683 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1685 if (crtc->config->port_clock == 162000) {
1686 /* For a long time we've carried around a ILK-DevA w/a for the
1687 * 160MHz clock. If we're really unlucky, it's still required.
1689 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1690 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1691 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1692 } else {
1693 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1694 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1697 I915_WRITE(DP_A, dpa_ctl);
1699 POSTING_READ(DP_A);
1700 udelay(500);
1703 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1704 const struct intel_crtc_state *pipe_config)
1706 intel_dp->link_rate = pipe_config->port_clock;
1707 intel_dp->lane_count = pipe_config->lane_count;
1710 static void intel_dp_prepare(struct intel_encoder *encoder)
1712 struct drm_device *dev = encoder->base.dev;
1713 struct drm_i915_private *dev_priv = dev->dev_private;
1714 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1715 enum port port = dp_to_dig_port(intel_dp)->port;
1716 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1717 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1719 intel_dp_set_link_params(intel_dp, crtc->config);
1722 * There are four kinds of DP registers:
1724 * IBX PCH
1725 * SNB CPU
1726 * IVB CPU
1727 * CPT PCH
1729 * IBX PCH and CPU are the same for almost everything,
1730 * except that the CPU DP PLL is configured in this
1731 * register
1733 * CPT PCH is quite different, having many bits moved
1734 * to the TRANS_DP_CTL register instead. That
1735 * configuration happens (oddly) in ironlake_pch_enable
1738 /* Preserve the BIOS-computed detected bit. This is
1739 * supposed to be read-only.
1741 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1743 /* Handle DP bits in common between all three register formats */
1744 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1745 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1747 if (crtc->config->has_audio)
1748 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1750 /* Split out the IBX/CPU vs CPT settings */
1752 if (IS_GEN7(dev) && port == PORT_A) {
1753 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1754 intel_dp->DP |= DP_SYNC_HS_HIGH;
1755 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1756 intel_dp->DP |= DP_SYNC_VS_HIGH;
1757 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1759 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1760 intel_dp->DP |= DP_ENHANCED_FRAMING;
1762 intel_dp->DP |= crtc->pipe << 29;
1763 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1764 u32 trans_dp;
1766 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1768 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1769 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1770 trans_dp |= TRANS_DP_ENH_FRAMING;
1771 else
1772 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1773 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1774 } else {
1775 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1776 crtc->config->limited_color_range)
1777 intel_dp->DP |= DP_COLOR_RANGE_16_235;
1779 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1780 intel_dp->DP |= DP_SYNC_HS_HIGH;
1781 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1782 intel_dp->DP |= DP_SYNC_VS_HIGH;
1783 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1785 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1786 intel_dp->DP |= DP_ENHANCED_FRAMING;
1788 if (IS_CHERRYVIEW(dev))
1789 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1790 else if (crtc->pipe == PIPE_B)
1791 intel_dp->DP |= DP_PIPEB_SELECT;
1795 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1796 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1798 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1799 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1801 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1802 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1804 static void wait_panel_status(struct intel_dp *intel_dp,
1805 u32 mask,
1806 u32 value)
1808 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1809 struct drm_i915_private *dev_priv = dev->dev_private;
1810 u32 pp_stat_reg, pp_ctrl_reg;
1812 lockdep_assert_held(&dev_priv->pps_mutex);
1814 pp_stat_reg = _pp_stat_reg(intel_dp);
1815 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1817 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1818 mask, value,
1819 I915_READ(pp_stat_reg),
1820 I915_READ(pp_ctrl_reg));
1822 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1823 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1824 I915_READ(pp_stat_reg),
1825 I915_READ(pp_ctrl_reg));
1828 DRM_DEBUG_KMS("Wait complete\n");
1831 static void wait_panel_on(struct intel_dp *intel_dp)
1833 DRM_DEBUG_KMS("Wait for panel power on\n");
1834 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1837 static void wait_panel_off(struct intel_dp *intel_dp)
1839 DRM_DEBUG_KMS("Wait for panel power off time\n");
1840 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1843 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1845 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1847 /* When we disable the VDD override bit last we have to do the manual
1848 * wait. */
1849 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1850 intel_dp->panel_power_cycle_delay);
1852 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1855 static void wait_backlight_on(struct intel_dp *intel_dp)
1857 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1858 intel_dp->backlight_on_delay);
1861 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1863 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1864 intel_dp->backlight_off_delay);
1867 /* Read the current pp_control value, unlocking the register if it
1868 * is locked
1871 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1873 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1874 struct drm_i915_private *dev_priv = dev->dev_private;
1875 u32 control;
1877 lockdep_assert_held(&dev_priv->pps_mutex);
1879 control = I915_READ(_pp_ctrl_reg(intel_dp));
1880 if (!IS_BROXTON(dev)) {
1881 control &= ~PANEL_UNLOCK_MASK;
1882 control |= PANEL_UNLOCK_REGS;
1884 return control;
1888 * Must be paired with edp_panel_vdd_off().
1889 * Must hold pps_mutex around the whole on/off sequence.
1890 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1892 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1894 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1895 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1896 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1897 struct drm_i915_private *dev_priv = dev->dev_private;
1898 enum intel_display_power_domain power_domain;
1899 u32 pp;
1900 u32 pp_stat_reg, pp_ctrl_reg;
1901 bool need_to_disable = !intel_dp->want_panel_vdd;
1903 lockdep_assert_held(&dev_priv->pps_mutex);
1905 if (!is_edp(intel_dp))
1906 return false;
1908 cancel_delayed_work(&intel_dp->panel_vdd_work);
1909 intel_dp->want_panel_vdd = true;
1911 if (edp_have_panel_vdd(intel_dp))
1912 return need_to_disable;
1914 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1915 intel_display_power_get(dev_priv, power_domain);
1917 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1918 port_name(intel_dig_port->port));
1920 if (!edp_have_panel_power(intel_dp))
1921 wait_panel_power_cycle(intel_dp);
1923 pp = ironlake_get_pp_control(intel_dp);
1924 pp |= EDP_FORCE_VDD;
1926 pp_stat_reg = _pp_stat_reg(intel_dp);
1927 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1929 I915_WRITE(pp_ctrl_reg, pp);
1930 POSTING_READ(pp_ctrl_reg);
1931 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1932 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1934 * If the panel wasn't on, delay before accessing aux channel
1936 if (!edp_have_panel_power(intel_dp)) {
1937 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1938 port_name(intel_dig_port->port));
1939 msleep(intel_dp->panel_power_up_delay);
1942 return need_to_disable;
1946 * Must be paired with intel_edp_panel_vdd_off() or
1947 * intel_edp_panel_off().
1948 * Nested calls to these functions are not allowed since
1949 * we drop the lock. Caller must use some higher level
1950 * locking to prevent nested calls from other threads.
1952 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1954 bool vdd;
1956 if (!is_edp(intel_dp))
1957 return;
1959 pps_lock(intel_dp);
1960 vdd = edp_panel_vdd_on(intel_dp);
1961 pps_unlock(intel_dp);
1963 #ifdef __DragonFly__
1964 /* XXX: limit dmesg spam to 16 warnings instead of 137, where is the bug? */
1965 if(!vdd)
1966 DRM_ERROR_RATELIMITED("eDP port %c VDD already requested on\n",
1967 port_name(dp_to_dig_port(intel_dp)->port));
1968 #else
1969 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1970 port_name(dp_to_dig_port(intel_dp)->port));
1971 #endif
1974 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1976 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1977 struct drm_i915_private *dev_priv = dev->dev_private;
1978 struct intel_digital_port *intel_dig_port =
1979 dp_to_dig_port(intel_dp);
1980 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1981 enum intel_display_power_domain power_domain;
1982 u32 pp;
1983 u32 pp_stat_reg, pp_ctrl_reg;
1985 lockdep_assert_held(&dev_priv->pps_mutex);
1987 WARN_ON(intel_dp->want_panel_vdd);
1989 if (!edp_have_panel_vdd(intel_dp))
1990 return;
1992 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1993 port_name(intel_dig_port->port));
1995 pp = ironlake_get_pp_control(intel_dp);
1996 pp &= ~EDP_FORCE_VDD;
1998 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1999 pp_stat_reg = _pp_stat_reg(intel_dp);
2001 I915_WRITE(pp_ctrl_reg, pp);
2002 POSTING_READ(pp_ctrl_reg);
2004 /* Make sure sequencer is idle before allowing subsequent activity */
2005 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2006 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2008 if ((pp & POWER_TARGET_ON) == 0)
2009 intel_dp->last_power_cycle = jiffies;
2011 power_domain = intel_display_port_aux_power_domain(intel_encoder);
2012 intel_display_power_put(dev_priv, power_domain);
2015 static void edp_panel_vdd_work(struct work_struct *__work)
2017 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
2018 struct intel_dp, panel_vdd_work);
2020 pps_lock(intel_dp);
2021 if (!intel_dp->want_panel_vdd)
2022 edp_panel_vdd_off_sync(intel_dp);
2023 pps_unlock(intel_dp);
2026 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2028 unsigned long delay;
2031 * Queue the timer to fire a long time from now (relative to the power
2032 * down delay) to keep the panel power up across a sequence of
2033 * operations.
2035 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2036 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2040 * Must be paired with edp_panel_vdd_on().
2041 * Must hold pps_mutex around the whole on/off sequence.
2042 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2044 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2046 struct drm_i915_private *dev_priv =
2047 intel_dp_to_dev(intel_dp)->dev_private;
2049 lockdep_assert_held(&dev_priv->pps_mutex);
2051 if (!is_edp(intel_dp))
2052 return;
2054 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2055 port_name(dp_to_dig_port(intel_dp)->port));
2057 intel_dp->want_panel_vdd = false;
2059 if (sync)
2060 edp_panel_vdd_off_sync(intel_dp);
2061 else
2062 edp_panel_vdd_schedule_off(intel_dp);
2065 static void edp_panel_on(struct intel_dp *intel_dp)
2067 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2068 struct drm_i915_private *dev_priv = dev->dev_private;
2069 u32 pp;
2070 u32 pp_ctrl_reg;
2072 lockdep_assert_held(&dev_priv->pps_mutex);
2074 if (!is_edp(intel_dp))
2075 return;
2077 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2078 port_name(dp_to_dig_port(intel_dp)->port));
2080 if (WARN(edp_have_panel_power(intel_dp),
2081 "eDP port %c panel power already on\n",
2082 port_name(dp_to_dig_port(intel_dp)->port)))
2083 return;
2085 wait_panel_power_cycle(intel_dp);
2087 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2088 pp = ironlake_get_pp_control(intel_dp);
2089 if (IS_GEN5(dev)) {
2090 /* ILK workaround: disable reset around power sequence */
2091 pp &= ~PANEL_POWER_RESET;
2092 I915_WRITE(pp_ctrl_reg, pp);
2093 POSTING_READ(pp_ctrl_reg);
2096 pp |= POWER_TARGET_ON;
2097 if (!IS_GEN5(dev))
2098 pp |= PANEL_POWER_RESET;
2100 I915_WRITE(pp_ctrl_reg, pp);
2101 POSTING_READ(pp_ctrl_reg);
2103 wait_panel_on(intel_dp);
2104 intel_dp->last_power_on = jiffies;
2106 if (IS_GEN5(dev)) {
2107 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2108 I915_WRITE(pp_ctrl_reg, pp);
2109 POSTING_READ(pp_ctrl_reg);
2113 void intel_edp_panel_on(struct intel_dp *intel_dp)
2115 if (!is_edp(intel_dp))
2116 return;
2118 pps_lock(intel_dp);
2119 edp_panel_on(intel_dp);
2120 pps_unlock(intel_dp);
2124 static void edp_panel_off(struct intel_dp *intel_dp)
2126 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2127 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2128 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2129 struct drm_i915_private *dev_priv = dev->dev_private;
2130 enum intel_display_power_domain power_domain;
2131 u32 pp;
2132 u32 pp_ctrl_reg;
2134 lockdep_assert_held(&dev_priv->pps_mutex);
2136 if (!is_edp(intel_dp))
2137 return;
2139 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2140 port_name(dp_to_dig_port(intel_dp)->port));
2142 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2143 port_name(dp_to_dig_port(intel_dp)->port));
2145 pp = ironlake_get_pp_control(intel_dp);
2146 /* We need to switch off panel power _and_ force vdd, for otherwise some
2147 * panels get very unhappy and cease to work. */
2148 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2149 EDP_BLC_ENABLE);
2151 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2153 intel_dp->want_panel_vdd = false;
2155 I915_WRITE(pp_ctrl_reg, pp);
2156 POSTING_READ(pp_ctrl_reg);
2158 intel_dp->last_power_cycle = jiffies;
2159 wait_panel_off(intel_dp);
2161 /* We got a reference when we enabled the VDD. */
2162 power_domain = intel_display_port_aux_power_domain(intel_encoder);
2163 intel_display_power_put(dev_priv, power_domain);
2166 void intel_edp_panel_off(struct intel_dp *intel_dp)
2168 if (!is_edp(intel_dp))
2169 return;
2171 pps_lock(intel_dp);
2172 edp_panel_off(intel_dp);
2173 pps_unlock(intel_dp);
2176 /* Enable backlight in the panel power control. */
2177 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2179 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2180 struct drm_device *dev = intel_dig_port->base.base.dev;
2181 struct drm_i915_private *dev_priv = dev->dev_private;
2182 u32 pp;
2183 u32 pp_ctrl_reg;
2186 * If we enable the backlight right away following a panel power
2187 * on, we may see slight flicker as the panel syncs with the eDP
2188 * link. So delay a bit to make sure the image is solid before
2189 * allowing it to appear.
2191 wait_backlight_on(intel_dp);
2193 pps_lock(intel_dp);
2195 pp = ironlake_get_pp_control(intel_dp);
2196 pp |= EDP_BLC_ENABLE;
2198 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2200 I915_WRITE(pp_ctrl_reg, pp);
2201 POSTING_READ(pp_ctrl_reg);
2203 pps_unlock(intel_dp);
2206 /* Enable backlight PWM and backlight PP control. */
2207 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2209 if (!is_edp(intel_dp))
2210 return;
2212 DRM_DEBUG_KMS("\n");
2214 intel_panel_enable_backlight(intel_dp->attached_connector);
2215 _intel_edp_backlight_on(intel_dp);
2218 /* Disable backlight in the panel power control. */
2219 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2221 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2222 struct drm_i915_private *dev_priv = dev->dev_private;
2223 u32 pp;
2224 u32 pp_ctrl_reg;
2226 if (!is_edp(intel_dp))
2227 return;
2229 pps_lock(intel_dp);
2231 pp = ironlake_get_pp_control(intel_dp);
2232 pp &= ~EDP_BLC_ENABLE;
2234 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2236 I915_WRITE(pp_ctrl_reg, pp);
2237 POSTING_READ(pp_ctrl_reg);
2239 pps_unlock(intel_dp);
2241 intel_dp->last_backlight_off = jiffies;
2242 edp_wait_backlight_off(intel_dp);
2245 /* Disable backlight PP control and backlight PWM. */
2246 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2248 if (!is_edp(intel_dp))
2249 return;
2251 DRM_DEBUG_KMS("\n");
2253 _intel_edp_backlight_off(intel_dp);
2254 intel_panel_disable_backlight(intel_dp->attached_connector);
2258 * Hook for controlling the panel power control backlight through the bl_power
2259 * sysfs attribute. Take care to handle multiple calls.
2261 static void intel_edp_backlight_power(struct intel_connector *connector,
2262 bool enable)
2264 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2265 bool is_enabled;
2267 pps_lock(intel_dp);
2268 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2269 pps_unlock(intel_dp);
2271 if (is_enabled == enable)
2272 return;
2274 DRM_DEBUG_KMS("panel power control backlight %s\n",
2275 enable ? "enable" : "disable");
2277 if (enable)
2278 _intel_edp_backlight_on(intel_dp);
2279 else
2280 _intel_edp_backlight_off(intel_dp);
2283 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2285 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2286 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2287 struct drm_device *dev = crtc->dev;
2288 struct drm_i915_private *dev_priv = dev->dev_private;
2289 u32 dpa_ctl;
2291 assert_pipe_disabled(dev_priv,
2292 to_intel_crtc(crtc)->pipe);
2294 DRM_DEBUG_KMS("\n");
2295 dpa_ctl = I915_READ(DP_A);
2296 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2297 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2299 /* We don't adjust intel_dp->DP while tearing down the link, to
2300 * facilitate link retraining (e.g. after hotplug). Hence clear all
2301 * enable bits here to ensure that we don't enable too much. */
2302 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2303 intel_dp->DP |= DP_PLL_ENABLE;
2304 I915_WRITE(DP_A, intel_dp->DP);
2305 POSTING_READ(DP_A);
2306 udelay(200);
2309 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2311 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2312 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2313 struct drm_device *dev = crtc->dev;
2314 struct drm_i915_private *dev_priv = dev->dev_private;
2315 u32 dpa_ctl;
2317 assert_pipe_disabled(dev_priv,
2318 to_intel_crtc(crtc)->pipe);
2320 dpa_ctl = I915_READ(DP_A);
2321 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2322 "dp pll off, should be on\n");
2323 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2325 /* We can't rely on the value tracked for the DP register in
2326 * intel_dp->DP because link_down must not change that (otherwise link
2327 * re-training will fail. */
2328 dpa_ctl &= ~DP_PLL_ENABLE;
2329 I915_WRITE(DP_A, dpa_ctl);
2330 POSTING_READ(DP_A);
2331 udelay(200);
2334 /* If the sink supports it, try to set the power state appropriately */
2335 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2337 int ret, i;
2339 /* Should have a valid DPCD by this point */
2340 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2341 return;
2343 if (mode != DRM_MODE_DPMS_ON) {
2344 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2345 DP_SET_POWER_D3);
2346 } else {
2348 * When turning on, we need to retry for 1ms to give the sink
2349 * time to wake up.
2351 for (i = 0; i < 3; i++) {
2352 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2353 DP_SET_POWER_D0);
2354 if (ret == 1)
2355 break;
2356 msleep(1);
2360 if (ret != 1)
2361 DRM_DEBUG_KMS("failed to %s sink power state\n",
2362 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2365 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2366 enum i915_pipe *pipe)
2368 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2369 enum port port = dp_to_dig_port(intel_dp)->port;
2370 struct drm_device *dev = encoder->base.dev;
2371 struct drm_i915_private *dev_priv = dev->dev_private;
2372 enum intel_display_power_domain power_domain;
2373 u32 tmp;
2375 power_domain = intel_display_port_power_domain(encoder);
2376 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2377 return false;
2379 tmp = I915_READ(intel_dp->output_reg);
2381 if (!(tmp & DP_PORT_EN))
2382 return false;
2384 if (IS_GEN7(dev) && port == PORT_A) {
2385 *pipe = PORT_TO_PIPE_CPT(tmp);
2386 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2387 enum i915_pipe p;
2389 for_each_pipe(dev_priv, p) {
2390 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2391 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2392 *pipe = p;
2393 return true;
2397 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2398 intel_dp->output_reg);
2399 } else if (IS_CHERRYVIEW(dev)) {
2400 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2401 } else {
2402 *pipe = PORT_TO_PIPE(tmp);
2405 return true;
2408 static void intel_dp_get_config(struct intel_encoder *encoder,
2409 struct intel_crtc_state *pipe_config)
2411 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2412 u32 tmp, flags = 0;
2413 struct drm_device *dev = encoder->base.dev;
2414 struct drm_i915_private *dev_priv = dev->dev_private;
2415 enum port port = dp_to_dig_port(intel_dp)->port;
2416 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2417 int dotclock;
2419 tmp = I915_READ(intel_dp->output_reg);
2421 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2423 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2424 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2426 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2427 flags |= DRM_MODE_FLAG_PHSYNC;
2428 else
2429 flags |= DRM_MODE_FLAG_NHSYNC;
2431 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2432 flags |= DRM_MODE_FLAG_PVSYNC;
2433 else
2434 flags |= DRM_MODE_FLAG_NVSYNC;
2435 } else {
2436 if (tmp & DP_SYNC_HS_HIGH)
2437 flags |= DRM_MODE_FLAG_PHSYNC;
2438 else
2439 flags |= DRM_MODE_FLAG_NHSYNC;
2441 if (tmp & DP_SYNC_VS_HIGH)
2442 flags |= DRM_MODE_FLAG_PVSYNC;
2443 else
2444 flags |= DRM_MODE_FLAG_NVSYNC;
2447 pipe_config->base.adjusted_mode.flags |= flags;
2449 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2450 tmp & DP_COLOR_RANGE_16_235)
2451 pipe_config->limited_color_range = true;
2453 pipe_config->has_dp_encoder = true;
2455 pipe_config->lane_count =
2456 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2458 intel_dp_get_m_n(crtc, pipe_config);
2460 if (port == PORT_A) {
2461 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2462 pipe_config->port_clock = 162000;
2463 else
2464 pipe_config->port_clock = 270000;
2467 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2468 &pipe_config->dp_m_n);
2470 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2471 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2473 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2475 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2476 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2478 * This is a big fat ugly hack.
2480 * Some machines in UEFI boot mode provide us a VBT that has 18
2481 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2482 * unknown we fail to light up. Yet the same BIOS boots up with
2483 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2484 * max, not what it tells us to use.
2486 * Note: This will still be broken if the eDP panel is not lit
2487 * up by the BIOS, and thus we can't get the mode at module
2488 * load.
2490 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2491 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2492 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2496 static void intel_disable_dp(struct intel_encoder *encoder)
2498 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2499 struct drm_device *dev = encoder->base.dev;
2500 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2502 if (crtc->config->has_audio)
2503 intel_audio_codec_disable(encoder);
2505 if (HAS_PSR(dev) && !HAS_DDI(dev))
2506 intel_psr_disable(intel_dp);
2508 /* Make sure the panel is off before trying to change the mode. But also
2509 * ensure that we have vdd while we switch off the panel. */
2510 intel_edp_panel_vdd_on(intel_dp);
2511 intel_edp_backlight_off(intel_dp);
2512 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2513 intel_edp_panel_off(intel_dp);
2515 /* disable the port before the pipe on g4x */
2516 if (INTEL_INFO(dev)->gen < 5)
2517 intel_dp_link_down(intel_dp);
2520 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2522 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2523 enum port port = dp_to_dig_port(intel_dp)->port;
2525 intel_dp_link_down(intel_dp);
2526 if (port == PORT_A)
2527 ironlake_edp_pll_off(intel_dp);
2530 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2532 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2534 intel_dp_link_down(intel_dp);
2537 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2538 bool reset)
2540 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2541 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2542 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2543 enum i915_pipe pipe = crtc->pipe;
2544 uint32_t val;
2546 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2547 if (reset)
2548 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2549 else
2550 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2551 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2553 if (crtc->config->lane_count > 2) {
2554 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2555 if (reset)
2556 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2557 else
2558 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2559 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2562 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2563 val |= CHV_PCS_REQ_SOFTRESET_EN;
2564 if (reset)
2565 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2566 else
2567 val |= DPIO_PCS_CLK_SOFT_RESET;
2568 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2570 if (crtc->config->lane_count > 2) {
2571 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2572 val |= CHV_PCS_REQ_SOFTRESET_EN;
2573 if (reset)
2574 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2575 else
2576 val |= DPIO_PCS_CLK_SOFT_RESET;
2577 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2581 static void chv_post_disable_dp(struct intel_encoder *encoder)
2583 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2584 struct drm_device *dev = encoder->base.dev;
2585 struct drm_i915_private *dev_priv = dev->dev_private;
2587 intel_dp_link_down(intel_dp);
2589 mutex_lock(&dev_priv->sb_lock);
2591 /* Assert data lane reset */
2592 chv_data_lane_soft_reset(encoder, true);
2594 mutex_unlock(&dev_priv->sb_lock);
2597 static void
2598 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2599 uint32_t *DP,
2600 uint8_t dp_train_pat)
2602 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2603 struct drm_device *dev = intel_dig_port->base.base.dev;
2604 struct drm_i915_private *dev_priv = dev->dev_private;
2605 enum port port = intel_dig_port->port;
2607 if (HAS_DDI(dev)) {
2608 uint32_t temp = I915_READ(DP_TP_CTL(port));
2610 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2611 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2612 else
2613 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2615 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2616 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2617 case DP_TRAINING_PATTERN_DISABLE:
2618 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2620 break;
2621 case DP_TRAINING_PATTERN_1:
2622 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2623 break;
2624 case DP_TRAINING_PATTERN_2:
2625 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2626 break;
2627 case DP_TRAINING_PATTERN_3:
2628 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2629 break;
2631 I915_WRITE(DP_TP_CTL(port), temp);
2633 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2634 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2635 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2637 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2638 case DP_TRAINING_PATTERN_DISABLE:
2639 *DP |= DP_LINK_TRAIN_OFF_CPT;
2640 break;
2641 case DP_TRAINING_PATTERN_1:
2642 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2643 break;
2644 case DP_TRAINING_PATTERN_2:
2645 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2646 break;
2647 case DP_TRAINING_PATTERN_3:
2648 DRM_ERROR("DP training pattern 3 not supported\n");
2649 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2650 break;
2653 } else {
2654 if (IS_CHERRYVIEW(dev))
2655 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2656 else
2657 *DP &= ~DP_LINK_TRAIN_MASK;
2659 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2660 case DP_TRAINING_PATTERN_DISABLE:
2661 *DP |= DP_LINK_TRAIN_OFF;
2662 break;
2663 case DP_TRAINING_PATTERN_1:
2664 *DP |= DP_LINK_TRAIN_PAT_1;
2665 break;
2666 case DP_TRAINING_PATTERN_2:
2667 *DP |= DP_LINK_TRAIN_PAT_2;
2668 break;
2669 case DP_TRAINING_PATTERN_3:
2670 if (IS_CHERRYVIEW(dev)) {
2671 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2672 } else {
2673 DRM_ERROR("DP training pattern 3 not supported\n");
2674 *DP |= DP_LINK_TRAIN_PAT_2;
2676 break;
2681 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2683 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2684 struct drm_i915_private *dev_priv = dev->dev_private;
2686 /* enable with pattern 1 (as per spec) */
2687 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2688 DP_TRAINING_PATTERN_1);
2690 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2691 POSTING_READ(intel_dp->output_reg);
2694 * Magic for VLV/CHV. We _must_ first set up the register
2695 * without actually enabling the port, and then do another
2696 * write to enable the port. Otherwise link training will
2697 * fail when the power sequencer is freshly used for this port.
2699 intel_dp->DP |= DP_PORT_EN;
2701 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2702 POSTING_READ(intel_dp->output_reg);
2705 static void intel_enable_dp(struct intel_encoder *encoder)
2707 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2708 struct drm_device *dev = encoder->base.dev;
2709 struct drm_i915_private *dev_priv = dev->dev_private;
2710 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2711 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2713 if (WARN_ON(dp_reg & DP_PORT_EN))
2714 return;
2716 pps_lock(intel_dp);
2718 if (IS_VALLEYVIEW(dev))
2719 vlv_init_panel_power_sequencer(intel_dp);
2721 intel_dp_enable_port(intel_dp);
2723 edp_panel_vdd_on(intel_dp);
2724 edp_panel_on(intel_dp);
2725 edp_panel_vdd_off(intel_dp, true);
2727 pps_unlock(intel_dp);
2729 if (IS_VALLEYVIEW(dev)) {
2730 unsigned int lane_mask = 0x0;
2732 if (IS_CHERRYVIEW(dev))
2733 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2735 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2736 lane_mask);
2739 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2740 intel_dp_start_link_train(intel_dp);
2741 intel_dp_stop_link_train(intel_dp);
2743 if (crtc->config->has_audio) {
2744 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2745 pipe_name(crtc->pipe));
2746 intel_audio_codec_enable(encoder);
2750 static void g4x_enable_dp(struct intel_encoder *encoder)
2752 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2754 intel_enable_dp(encoder);
2755 intel_edp_backlight_on(intel_dp);
2758 static void vlv_enable_dp(struct intel_encoder *encoder)
2760 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2762 intel_edp_backlight_on(intel_dp);
2763 intel_psr_enable(intel_dp);
2766 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2768 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2769 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2771 intel_dp_prepare(encoder);
2773 /* Only ilk+ has port A */
2774 if (dport->port == PORT_A) {
2775 ironlake_set_pll_cpu_edp(intel_dp);
2776 ironlake_edp_pll_on(intel_dp);
2780 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2782 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2783 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2784 enum i915_pipe pipe = intel_dp->pps_pipe;
2785 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2787 edp_panel_vdd_off_sync(intel_dp);
2790 * VLV seems to get confused when multiple power seqeuencers
2791 * have the same port selected (even if only one has power/vdd
2792 * enabled). The failure manifests as vlv_wait_port_ready() failing
2793 * CHV on the other hand doesn't seem to mind having the same port
2794 * selected in multiple power seqeuencers, but let's clear the
2795 * port select always when logically disconnecting a power sequencer
2796 * from a port.
2798 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2799 pipe_name(pipe), port_name(intel_dig_port->port));
2800 I915_WRITE(pp_on_reg, 0);
2801 POSTING_READ(pp_on_reg);
2803 intel_dp->pps_pipe = INVALID_PIPE;
2806 static void vlv_steal_power_sequencer(struct drm_device *dev,
2807 enum i915_pipe pipe)
2809 struct drm_i915_private *dev_priv = dev->dev_private;
2810 struct intel_encoder *encoder;
2812 lockdep_assert_held(&dev_priv->pps_mutex);
2814 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2815 return;
2817 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2818 base.head) {
2819 struct intel_dp *intel_dp;
2820 enum port port;
2822 if (encoder->type != INTEL_OUTPUT_EDP)
2823 continue;
2825 intel_dp = enc_to_intel_dp(&encoder->base);
2826 port = dp_to_dig_port(intel_dp)->port;
2828 if (intel_dp->pps_pipe != pipe)
2829 continue;
2831 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2832 pipe_name(pipe), port_name(port));
2834 WARN(encoder->base.crtc,
2835 "stealing pipe %c power sequencer from active eDP port %c\n",
2836 pipe_name(pipe), port_name(port));
2838 /* make sure vdd is off before we steal it */
2839 vlv_detach_power_sequencer(intel_dp);
2843 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2845 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2846 struct intel_encoder *encoder = &intel_dig_port->base;
2847 struct drm_device *dev = encoder->base.dev;
2848 struct drm_i915_private *dev_priv = dev->dev_private;
2849 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2851 lockdep_assert_held(&dev_priv->pps_mutex);
2853 if (!is_edp(intel_dp))
2854 return;
2856 if (intel_dp->pps_pipe == crtc->pipe)
2857 return;
2860 * If another power sequencer was being used on this
2861 * port previously make sure to turn off vdd there while
2862 * we still have control of it.
2864 if (intel_dp->pps_pipe != INVALID_PIPE)
2865 vlv_detach_power_sequencer(intel_dp);
2868 * We may be stealing the power
2869 * sequencer from another port.
2871 vlv_steal_power_sequencer(dev, crtc->pipe);
2873 /* now it's all ours */
2874 intel_dp->pps_pipe = crtc->pipe;
2876 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2877 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2879 /* init power sequencer on this pipe and port */
2880 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2881 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2884 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2886 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2887 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2888 struct drm_device *dev = encoder->base.dev;
2889 struct drm_i915_private *dev_priv = dev->dev_private;
2890 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2891 enum dpio_channel port = vlv_dport_to_channel(dport);
2892 int pipe = intel_crtc->pipe;
2893 u32 val;
2895 mutex_lock(&dev_priv->sb_lock);
2897 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2898 val = 0;
2899 if (pipe)
2900 val |= (1<<21);
2901 else
2902 val &= ~(1<<21);
2903 val |= 0x001000c4;
2904 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2905 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2906 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2908 mutex_unlock(&dev_priv->sb_lock);
2910 intel_enable_dp(encoder);
2913 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2915 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2916 struct drm_device *dev = encoder->base.dev;
2917 struct drm_i915_private *dev_priv = dev->dev_private;
2918 struct intel_crtc *intel_crtc =
2919 to_intel_crtc(encoder->base.crtc);
2920 enum dpio_channel port = vlv_dport_to_channel(dport);
2921 int pipe = intel_crtc->pipe;
2923 intel_dp_prepare(encoder);
2925 /* Program Tx lane resets to default */
2926 mutex_lock(&dev_priv->sb_lock);
2927 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2928 DPIO_PCS_TX_LANE2_RESET |
2929 DPIO_PCS_TX_LANE1_RESET);
2930 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2931 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2932 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2933 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2934 DPIO_PCS_CLK_SOFT_RESET);
2936 /* Fix up inter-pair skew failure */
2937 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2938 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2939 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2940 mutex_unlock(&dev_priv->sb_lock);
2943 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2945 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2946 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2947 struct drm_device *dev = encoder->base.dev;
2948 struct drm_i915_private *dev_priv = dev->dev_private;
2949 struct intel_crtc *intel_crtc =
2950 to_intel_crtc(encoder->base.crtc);
2951 enum dpio_channel ch = vlv_dport_to_channel(dport);
2952 int pipe = intel_crtc->pipe;
2953 int data, i, stagger;
2954 u32 val;
2956 mutex_lock(&dev_priv->sb_lock);
2958 /* allow hardware to manage TX FIFO reset source */
2959 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2960 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2961 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2963 if (intel_crtc->config->lane_count > 2) {
2964 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2965 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2966 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2969 /* Program Tx lane latency optimal setting*/
2970 for (i = 0; i < intel_crtc->config->lane_count; i++) {
2971 /* Set the upar bit */
2972 if (intel_crtc->config->lane_count == 1)
2973 data = 0x0;
2974 else
2975 data = (i == 1) ? 0x0 : 0x1;
2976 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2977 data << DPIO_UPAR_SHIFT);
2980 /* Data lane stagger programming */
2981 if (intel_crtc->config->port_clock > 270000)
2982 stagger = 0x18;
2983 else if (intel_crtc->config->port_clock > 135000)
2984 stagger = 0xd;
2985 else if (intel_crtc->config->port_clock > 67500)
2986 stagger = 0x7;
2987 else if (intel_crtc->config->port_clock > 33750)
2988 stagger = 0x4;
2989 else
2990 stagger = 0x2;
2992 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2993 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2994 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2996 if (intel_crtc->config->lane_count > 2) {
2997 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2998 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2999 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3002 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3003 DPIO_LANESTAGGER_STRAP(stagger) |
3004 DPIO_LANESTAGGER_STRAP_OVRD |
3005 DPIO_TX1_STAGGER_MASK(0x1f) |
3006 DPIO_TX1_STAGGER_MULT(6) |
3007 DPIO_TX2_STAGGER_MULT(0));
3009 if (intel_crtc->config->lane_count > 2) {
3010 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3011 DPIO_LANESTAGGER_STRAP(stagger) |
3012 DPIO_LANESTAGGER_STRAP_OVRD |
3013 DPIO_TX1_STAGGER_MASK(0x1f) |
3014 DPIO_TX1_STAGGER_MULT(7) |
3015 DPIO_TX2_STAGGER_MULT(5));
3018 /* Deassert data lane reset */
3019 chv_data_lane_soft_reset(encoder, false);
3021 mutex_unlock(&dev_priv->sb_lock);
3023 intel_enable_dp(encoder);
3025 /* Second common lane will stay alive on its own now */
3026 if (dport->release_cl2_override) {
3027 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3028 dport->release_cl2_override = false;
3032 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3034 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3035 struct drm_device *dev = encoder->base.dev;
3036 struct drm_i915_private *dev_priv = dev->dev_private;
3037 struct intel_crtc *intel_crtc =
3038 to_intel_crtc(encoder->base.crtc);
3039 enum dpio_channel ch = vlv_dport_to_channel(dport);
3040 enum i915_pipe pipe = intel_crtc->pipe;
3041 unsigned int lane_mask =
3042 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
3043 u32 val;
3045 intel_dp_prepare(encoder);
3048 * Must trick the second common lane into life.
3049 * Otherwise we can't even access the PLL.
3051 if (ch == DPIO_CH0 && pipe == PIPE_B)
3052 dport->release_cl2_override =
3053 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3055 chv_phy_powergate_lanes(encoder, true, lane_mask);
3057 mutex_lock(&dev_priv->sb_lock);
3059 /* Assert data lane reset */
3060 chv_data_lane_soft_reset(encoder, true);
3062 /* program left/right clock distribution */
3063 if (pipe != PIPE_B) {
3064 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3065 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3066 if (ch == DPIO_CH0)
3067 val |= CHV_BUFLEFTENA1_FORCE;
3068 if (ch == DPIO_CH1)
3069 val |= CHV_BUFRIGHTENA1_FORCE;
3070 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3071 } else {
3072 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3073 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3074 if (ch == DPIO_CH0)
3075 val |= CHV_BUFLEFTENA2_FORCE;
3076 if (ch == DPIO_CH1)
3077 val |= CHV_BUFRIGHTENA2_FORCE;
3078 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3081 /* program clock channel usage */
3082 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3083 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3084 if (pipe != PIPE_B)
3085 val &= ~CHV_PCS_USEDCLKCHANNEL;
3086 else
3087 val |= CHV_PCS_USEDCLKCHANNEL;
3088 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3090 if (intel_crtc->config->lane_count > 2) {
3091 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3092 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3093 if (pipe != PIPE_B)
3094 val &= ~CHV_PCS_USEDCLKCHANNEL;
3095 else
3096 val |= CHV_PCS_USEDCLKCHANNEL;
3097 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3101 * This a a bit weird since generally CL
3102 * matches the pipe, but here we need to
3103 * pick the CL based on the port.
3105 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3106 if (pipe != PIPE_B)
3107 val &= ~CHV_CMN_USEDCLKCHANNEL;
3108 else
3109 val |= CHV_CMN_USEDCLKCHANNEL;
3110 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3112 mutex_unlock(&dev_priv->sb_lock);
3115 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3117 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3118 enum i915_pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3119 u32 val;
3121 mutex_lock(&dev_priv->sb_lock);
3123 /* disable left/right clock distribution */
3124 if (pipe != PIPE_B) {
3125 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3126 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3127 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3128 } else {
3129 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3130 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3131 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3134 mutex_unlock(&dev_priv->sb_lock);
3137 * Leave the power down bit cleared for at least one
3138 * lane so that chv_powergate_phy_ch() will power
3139 * on something when the channel is otherwise unused.
3140 * When the port is off and the override is removed
3141 * the lanes power down anyway, so otherwise it doesn't
3142 * really matter what the state of power down bits is
3143 * after this.
3145 chv_phy_powergate_lanes(encoder, false, 0x0);
3149 * Native read with retry for link status and receiver capability reads for
3150 * cases where the sink may still be asleep.
3152 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3153 * supposed to retry 3 times per the spec.
3155 static ssize_t
3156 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3157 void *buffer, size_t size)
3159 ssize_t ret;
3160 int i;
3163 * Sometime we just get the same incorrect byte repeated
3164 * over the entire buffer. Doing just one throw away read
3165 * initially seems to "solve" it.
3167 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3169 for (i = 0; i < 3; i++) {
3170 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3171 if (ret == size)
3172 return ret;
3173 msleep(1);
3176 return ret;
3180 * Fetch AUX CH registers 0x202 - 0x207 which contain
3181 * link status information
3183 static bool
3184 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3186 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3187 DP_LANE0_1_STATUS,
3188 link_status,
3189 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3192 /* These are source-specific values. */
3193 static uint8_t
3194 intel_dp_voltage_max(struct intel_dp *intel_dp)
3196 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3197 struct drm_i915_private *dev_priv = dev->dev_private;
3198 enum port port = dp_to_dig_port(intel_dp)->port;
3200 if (IS_BROXTON(dev))
3201 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3202 else if (INTEL_INFO(dev)->gen >= 9) {
3203 if (dev_priv->edp_low_vswing && port == PORT_A)
3204 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3205 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3206 } else if (IS_VALLEYVIEW(dev))
3207 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3208 else if (IS_GEN7(dev) && port == PORT_A)
3209 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3210 else if (HAS_PCH_CPT(dev) && port != PORT_A)
3211 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3212 else
3213 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3216 static uint8_t
3217 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3219 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3220 enum port port = dp_to_dig_port(intel_dp)->port;
3222 if (INTEL_INFO(dev)->gen >= 9) {
3223 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3224 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3225 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3226 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3227 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3228 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3229 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3230 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3231 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3232 default:
3233 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3235 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3236 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3237 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3238 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3239 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3240 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3241 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3242 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3243 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3244 default:
3245 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3247 } else if (IS_VALLEYVIEW(dev)) {
3248 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3249 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3250 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3251 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3252 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3253 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3254 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3255 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3256 default:
3257 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3259 } else if (IS_GEN7(dev) && port == PORT_A) {
3260 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3261 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3262 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3263 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3264 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3265 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3266 default:
3267 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3269 } else {
3270 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3271 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3272 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3273 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3274 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3275 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3276 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3277 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3278 default:
3279 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3284 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3286 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3287 struct drm_i915_private *dev_priv = dev->dev_private;
3288 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3289 struct intel_crtc *intel_crtc =
3290 to_intel_crtc(dport->base.base.crtc);
3291 unsigned long demph_reg_value, preemph_reg_value,
3292 uniqtranscale_reg_value;
3293 uint8_t train_set = intel_dp->train_set[0];
3294 enum dpio_channel port = vlv_dport_to_channel(dport);
3295 int pipe = intel_crtc->pipe;
3297 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3298 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3299 preemph_reg_value = 0x0004000;
3300 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3301 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3302 demph_reg_value = 0x2B405555;
3303 uniqtranscale_reg_value = 0x552AB83A;
3304 break;
3305 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3306 demph_reg_value = 0x2B404040;
3307 uniqtranscale_reg_value = 0x5548B83A;
3308 break;
3309 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3310 demph_reg_value = 0x2B245555;
3311 uniqtranscale_reg_value = 0x5560B83A;
3312 break;
3313 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3314 demph_reg_value = 0x2B405555;
3315 uniqtranscale_reg_value = 0x5598DA3A;
3316 break;
3317 default:
3318 return 0;
3320 break;
3321 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3322 preemph_reg_value = 0x0002000;
3323 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3324 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3325 demph_reg_value = 0x2B404040;
3326 uniqtranscale_reg_value = 0x5552B83A;
3327 break;
3328 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3329 demph_reg_value = 0x2B404848;
3330 uniqtranscale_reg_value = 0x5580B83A;
3331 break;
3332 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3333 demph_reg_value = 0x2B404040;
3334 uniqtranscale_reg_value = 0x55ADDA3A;
3335 break;
3336 default:
3337 return 0;
3339 break;
3340 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3341 preemph_reg_value = 0x0000000;
3342 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3343 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3344 demph_reg_value = 0x2B305555;
3345 uniqtranscale_reg_value = 0x5570B83A;
3346 break;
3347 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3348 demph_reg_value = 0x2B2B4040;
3349 uniqtranscale_reg_value = 0x55ADDA3A;
3350 break;
3351 default:
3352 return 0;
3354 break;
3355 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3356 preemph_reg_value = 0x0006000;
3357 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3358 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3359 demph_reg_value = 0x1B405555;
3360 uniqtranscale_reg_value = 0x55ADDA3A;
3361 break;
3362 default:
3363 return 0;
3365 break;
3366 default:
3367 return 0;
3370 mutex_lock(&dev_priv->sb_lock);
3371 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3372 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3373 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3374 uniqtranscale_reg_value);
3375 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3376 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3377 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3378 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3379 mutex_unlock(&dev_priv->sb_lock);
3381 return 0;
3384 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3386 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3387 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3390 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3392 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3393 struct drm_i915_private *dev_priv = dev->dev_private;
3394 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3395 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3396 u32 deemph_reg_value, margin_reg_value, val;
3397 uint8_t train_set = intel_dp->train_set[0];
3398 enum dpio_channel ch = vlv_dport_to_channel(dport);
3399 enum i915_pipe pipe = intel_crtc->pipe;
3400 int i;
3402 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3403 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3404 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3405 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3406 deemph_reg_value = 128;
3407 margin_reg_value = 52;
3408 break;
3409 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3410 deemph_reg_value = 128;
3411 margin_reg_value = 77;
3412 break;
3413 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3414 deemph_reg_value = 128;
3415 margin_reg_value = 102;
3416 break;
3417 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3418 deemph_reg_value = 128;
3419 margin_reg_value = 154;
3420 /* FIXME extra to set for 1200 */
3421 break;
3422 default:
3423 return 0;
3425 break;
3426 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3427 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3428 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3429 deemph_reg_value = 85;
3430 margin_reg_value = 78;
3431 break;
3432 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3433 deemph_reg_value = 85;
3434 margin_reg_value = 116;
3435 break;
3436 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3437 deemph_reg_value = 85;
3438 margin_reg_value = 154;
3439 break;
3440 default:
3441 return 0;
3443 break;
3444 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3445 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3446 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3447 deemph_reg_value = 64;
3448 margin_reg_value = 104;
3449 break;
3450 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3451 deemph_reg_value = 64;
3452 margin_reg_value = 154;
3453 break;
3454 default:
3455 return 0;
3457 break;
3458 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3459 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3460 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3461 deemph_reg_value = 43;
3462 margin_reg_value = 154;
3463 break;
3464 default:
3465 return 0;
3467 break;
3468 default:
3469 return 0;
3472 mutex_lock(&dev_priv->sb_lock);
3474 /* Clear calc init */
3475 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3476 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3477 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3478 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3479 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3481 if (intel_crtc->config->lane_count > 2) {
3482 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3483 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3484 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3485 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3486 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3489 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3490 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3491 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3492 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3494 if (intel_crtc->config->lane_count > 2) {
3495 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3496 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3497 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3498 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3501 /* Program swing deemph */
3502 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3503 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3504 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3505 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3506 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3509 /* Program swing margin */
3510 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3511 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3513 val &= ~DPIO_SWING_MARGIN000_MASK;
3514 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3517 * Supposedly this value shouldn't matter when unique transition
3518 * scale is disabled, but in fact it does matter. Let's just
3519 * always program the same value and hope it's OK.
3521 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3522 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3524 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3528 * The document said it needs to set bit 27 for ch0 and bit 26
3529 * for ch1. Might be a typo in the doc.
3530 * For now, for this unique transition scale selection, set bit
3531 * 27 for ch0 and ch1.
3533 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3534 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3535 if (chv_need_uniq_trans_scale(train_set))
3536 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3537 else
3538 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3539 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3542 /* Start swing calculation */
3543 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3544 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3545 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3547 if (intel_crtc->config->lane_count > 2) {
3548 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3549 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3550 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3553 mutex_unlock(&dev_priv->sb_lock);
3555 return 0;
3558 static void
3559 intel_get_adjust_train(struct intel_dp *intel_dp,
3560 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3562 uint8_t v = 0;
3563 uint8_t p = 0;
3564 int lane;
3565 uint8_t voltage_max;
3566 uint8_t preemph_max;
3568 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3569 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3570 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3572 if (this_v > v)
3573 v = this_v;
3574 if (this_p > p)
3575 p = this_p;
3578 voltage_max = intel_dp_voltage_max(intel_dp);
3579 if (v >= voltage_max)
3580 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3582 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3583 if (p >= preemph_max)
3584 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3586 for (lane = 0; lane < 4; lane++)
3587 intel_dp->train_set[lane] = v | p;
3590 static uint32_t
3591 gen4_signal_levels(uint8_t train_set)
3593 uint32_t signal_levels = 0;
3595 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3596 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3597 default:
3598 signal_levels |= DP_VOLTAGE_0_4;
3599 break;
3600 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3601 signal_levels |= DP_VOLTAGE_0_6;
3602 break;
3603 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3604 signal_levels |= DP_VOLTAGE_0_8;
3605 break;
3606 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3607 signal_levels |= DP_VOLTAGE_1_2;
3608 break;
3610 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3611 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3612 default:
3613 signal_levels |= DP_PRE_EMPHASIS_0;
3614 break;
3615 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3616 signal_levels |= DP_PRE_EMPHASIS_3_5;
3617 break;
3618 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3619 signal_levels |= DP_PRE_EMPHASIS_6;
3620 break;
3621 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3622 signal_levels |= DP_PRE_EMPHASIS_9_5;
3623 break;
3625 return signal_levels;
3628 /* Gen6's DP voltage swing and pre-emphasis control */
3629 static uint32_t
3630 gen6_edp_signal_levels(uint8_t train_set)
3632 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3633 DP_TRAIN_PRE_EMPHASIS_MASK);
3634 switch (signal_levels) {
3635 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3636 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3637 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3638 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3639 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3640 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3641 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3642 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3643 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3644 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3645 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3646 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3647 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3648 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3649 default:
3650 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3651 "0x%x\n", signal_levels);
3652 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3656 /* Gen7's DP voltage swing and pre-emphasis control */
3657 static uint32_t
3658 gen7_edp_signal_levels(uint8_t train_set)
3660 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3661 DP_TRAIN_PRE_EMPHASIS_MASK);
3662 switch (signal_levels) {
3663 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3664 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3665 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3666 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3667 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3668 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3670 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3671 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3672 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3673 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3675 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3676 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3677 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3678 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3680 default:
3681 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3682 "0x%x\n", signal_levels);
3683 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3687 /* Properly updates "DP" with the correct signal levels. */
3688 static void
3689 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3691 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3692 enum port port = intel_dig_port->port;
3693 struct drm_device *dev = intel_dig_port->base.base.dev;
3694 uint32_t signal_levels, mask = 0;
3695 uint8_t train_set = intel_dp->train_set[0];
3697 if (HAS_DDI(dev)) {
3698 signal_levels = ddi_signal_levels(intel_dp);
3700 if (IS_BROXTON(dev))
3701 signal_levels = 0;
3702 else
3703 mask = DDI_BUF_EMP_MASK;
3704 } else if (IS_CHERRYVIEW(dev)) {
3705 signal_levels = chv_signal_levels(intel_dp);
3706 } else if (IS_VALLEYVIEW(dev)) {
3707 signal_levels = vlv_signal_levels(intel_dp);
3708 } else if (IS_GEN7(dev) && port == PORT_A) {
3709 signal_levels = gen7_edp_signal_levels(train_set);
3710 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3711 } else if (IS_GEN6(dev) && port == PORT_A) {
3712 signal_levels = gen6_edp_signal_levels(train_set);
3713 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3714 } else {
3715 signal_levels = gen4_signal_levels(train_set);
3716 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3719 if (mask)
3720 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3722 DRM_DEBUG_KMS("Using vswing level %d\n",
3723 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3724 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3725 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3726 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3728 *DP = (*DP & ~mask) | signal_levels;
3731 static bool
3732 intel_dp_set_link_train(struct intel_dp *intel_dp,
3733 uint32_t *DP,
3734 uint8_t dp_train_pat)
3736 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3737 struct drm_i915_private *dev_priv =
3738 to_i915(intel_dig_port->base.base.dev);
3739 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3740 int ret, len;
3742 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3744 I915_WRITE(intel_dp->output_reg, *DP);
3745 POSTING_READ(intel_dp->output_reg);
3747 buf[0] = dp_train_pat;
3748 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3749 DP_TRAINING_PATTERN_DISABLE) {
3750 /* don't write DP_TRAINING_LANEx_SET on disable */
3751 len = 1;
3752 } else {
3753 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3754 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3755 len = intel_dp->lane_count + 1;
3758 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3759 buf, len);
3761 return ret == len;
3764 static bool
3765 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3766 uint8_t dp_train_pat)
3768 if (!intel_dp->train_set_valid)
3769 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3770 intel_dp_set_signal_levels(intel_dp, DP);
3771 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3774 static bool
3775 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3776 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3778 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3779 struct drm_i915_private *dev_priv =
3780 to_i915(intel_dig_port->base.base.dev);
3781 int ret;
3783 intel_get_adjust_train(intel_dp, link_status);
3784 intel_dp_set_signal_levels(intel_dp, DP);
3786 I915_WRITE(intel_dp->output_reg, *DP);
3787 POSTING_READ(intel_dp->output_reg);
3789 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3790 intel_dp->train_set, intel_dp->lane_count);
3792 return ret == intel_dp->lane_count;
3795 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3797 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3798 struct drm_device *dev = intel_dig_port->base.base.dev;
3799 struct drm_i915_private *dev_priv = dev->dev_private;
3800 enum port port = intel_dig_port->port;
3801 uint32_t val;
3803 if (!HAS_DDI(dev))
3804 return;
3806 val = I915_READ(DP_TP_CTL(port));
3807 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3808 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3809 I915_WRITE(DP_TP_CTL(port), val);
3812 * On PORT_A we can have only eDP in SST mode. There the only reason
3813 * we need to set idle transmission mode is to work around a HW issue
3814 * where we enable the pipe while not in idle link-training mode.
3815 * In this case there is requirement to wait for a minimum number of
3816 * idle patterns to be sent.
3818 if (port == PORT_A)
3819 return;
3821 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3823 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3826 /* Enable corresponding port and start training pattern 1 */
3827 static void
3828 intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
3830 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3831 struct drm_device *dev = encoder->dev;
3832 int i;
3833 uint8_t voltage;
3834 int voltage_tries, loop_tries;
3835 uint32_t DP = intel_dp->DP;
3836 uint8_t link_config[2];
3837 uint8_t link_bw, rate_select;
3839 if (HAS_DDI(dev))
3840 intel_ddi_prepare_link_retrain(encoder);
3842 intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
3843 &link_bw, &rate_select);
3845 /* Write the link configuration data */
3846 link_config[0] = link_bw;
3847 link_config[1] = intel_dp->lane_count;
3848 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3849 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3850 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3851 if (intel_dp->num_sink_rates)
3852 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3853 &rate_select, 1);
3855 link_config[0] = 0;
3856 link_config[1] = DP_SET_ANSI_8B10B;
3857 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3859 DP |= DP_PORT_EN;
3861 /* clock recovery */
3862 if (!intel_dp_reset_link_train(intel_dp, &DP,
3863 DP_TRAINING_PATTERN_1 |
3864 DP_LINK_SCRAMBLING_DISABLE)) {
3865 DRM_ERROR("failed to enable link training\n");
3866 return;
3869 voltage = 0xff;
3870 voltage_tries = 0;
3871 loop_tries = 0;
3872 for (;;) {
3873 uint8_t link_status[DP_LINK_STATUS_SIZE];
3875 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3876 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3877 DRM_ERROR("failed to get link status\n");
3878 break;
3881 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3882 DRM_DEBUG_KMS("clock recovery OK\n");
3883 break;
3887 * if we used previously trained voltage and pre-emphasis values
3888 * and we don't get clock recovery, reset link training values
3890 if (intel_dp->train_set_valid) {
3891 DRM_DEBUG_KMS("clock recovery not ok, reset");
3892 /* clear the flag as we are not reusing train set */
3893 intel_dp->train_set_valid = false;
3894 if (!intel_dp_reset_link_train(intel_dp, &DP,
3895 DP_TRAINING_PATTERN_1 |
3896 DP_LINK_SCRAMBLING_DISABLE)) {
3897 DRM_ERROR("failed to enable link training\n");
3898 return;
3900 continue;
3903 /* Check to see if we've tried the max voltage */
3904 for (i = 0; i < intel_dp->lane_count; i++)
3905 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3906 break;
3907 if (i == intel_dp->lane_count) {
3908 ++loop_tries;
3909 if (loop_tries == 5) {
3910 DRM_ERROR("too many full retries, give up\n");
3911 break;
3913 intel_dp_reset_link_train(intel_dp, &DP,
3914 DP_TRAINING_PATTERN_1 |
3915 DP_LINK_SCRAMBLING_DISABLE);
3916 voltage_tries = 0;
3917 continue;
3920 /* Check to see if we've tried the same voltage 5 times */
3921 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3922 ++voltage_tries;
3923 if (voltage_tries == 5) {
3924 DRM_ERROR("too many voltage retries, give up\n");
3925 break;
3927 } else
3928 voltage_tries = 0;
3929 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3931 /* Update training set as requested by target */
3932 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3933 DRM_ERROR("failed to update link training\n");
3934 break;
3938 intel_dp->DP = DP;
3941 static void
3942 intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
3944 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3945 struct drm_device *dev = dig_port->base.base.dev;
3946 bool channel_eq = false;
3947 int tries, cr_tries;
3948 uint32_t DP = intel_dp->DP;
3949 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3952 * Training Pattern 3 for HBR2 or 1.2 devices that support it.
3954 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
3955 * also mandatory for downstream devices that support HBR2.
3957 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
3958 * supported but still not enabled.
3960 if (intel_dp_source_supports_hbr2(dev) &&
3961 drm_dp_tps3_supported(intel_dp->dpcd))
3962 training_pattern = DP_TRAINING_PATTERN_3;
3963 else if (intel_dp->link_rate == 540000)
3964 DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
3966 /* channel equalization */
3967 if (!intel_dp_set_link_train(intel_dp, &DP,
3968 training_pattern |
3969 DP_LINK_SCRAMBLING_DISABLE)) {
3970 DRM_ERROR("failed to start channel equalization\n");
3971 return;
3974 tries = 0;
3975 cr_tries = 0;
3976 channel_eq = false;
3977 for (;;) {
3978 uint8_t link_status[DP_LINK_STATUS_SIZE];
3980 if (cr_tries > 5) {
3981 DRM_ERROR("failed to train DP, aborting\n");
3982 break;
3985 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3986 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3987 DRM_ERROR("failed to get link status\n");
3988 break;
3991 /* Make sure clock is still ok */
3992 if (!drm_dp_clock_recovery_ok(link_status,
3993 intel_dp->lane_count)) {
3994 intel_dp->train_set_valid = false;
3995 intel_dp_link_training_clock_recovery(intel_dp);
3996 intel_dp_set_link_train(intel_dp, &DP,
3997 training_pattern |
3998 DP_LINK_SCRAMBLING_DISABLE);
3999 cr_tries++;
4000 continue;
4003 if (drm_dp_channel_eq_ok(link_status,
4004 intel_dp->lane_count)) {
4005 channel_eq = true;
4006 break;
4009 /* Try 5 times, then try clock recovery if that fails */
4010 if (tries > 5) {
4011 intel_dp->train_set_valid = false;
4012 intel_dp_link_training_clock_recovery(intel_dp);
4013 intel_dp_set_link_train(intel_dp, &DP,
4014 training_pattern |
4015 DP_LINK_SCRAMBLING_DISABLE);
4016 tries = 0;
4017 cr_tries++;
4018 continue;
4021 /* Update training set as requested by target */
4022 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
4023 DRM_ERROR("failed to update link training\n");
4024 break;
4026 ++tries;
4029 intel_dp_set_idle_link_train(intel_dp);
4031 intel_dp->DP = DP;
4033 if (channel_eq) {
4034 intel_dp->train_set_valid = true;
4035 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
4039 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
4041 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
4042 DP_TRAINING_PATTERN_DISABLE);
4045 void
4046 intel_dp_start_link_train(struct intel_dp *intel_dp)
4048 intel_dp_link_training_clock_recovery(intel_dp);
4049 intel_dp_link_training_channel_equalization(intel_dp);
4052 static void
4053 intel_dp_link_down(struct intel_dp *intel_dp)
4055 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4056 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
4057 enum port port = intel_dig_port->port;
4058 struct drm_device *dev = intel_dig_port->base.base.dev;
4059 struct drm_i915_private *dev_priv = dev->dev_private;
4060 uint32_t DP = intel_dp->DP;
4062 if (WARN_ON(HAS_DDI(dev)))
4063 return;
4065 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
4066 return;
4068 DRM_DEBUG_KMS("\n");
4070 if ((IS_GEN7(dev) && port == PORT_A) ||
4071 (HAS_PCH_CPT(dev) && port != PORT_A)) {
4072 DP &= ~DP_LINK_TRAIN_MASK_CPT;
4073 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
4074 } else {
4075 if (IS_CHERRYVIEW(dev))
4076 DP &= ~DP_LINK_TRAIN_MASK_CHV;
4077 else
4078 DP &= ~DP_LINK_TRAIN_MASK;
4079 DP |= DP_LINK_TRAIN_PAT_IDLE;
4081 I915_WRITE(intel_dp->output_reg, DP);
4082 POSTING_READ(intel_dp->output_reg);
4084 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
4085 I915_WRITE(intel_dp->output_reg, DP);
4086 POSTING_READ(intel_dp->output_reg);
4089 * HW workaround for IBX, we need to move the port
4090 * to transcoder A after disabling it to allow the
4091 * matching HDMI port to be enabled on transcoder A.
4093 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
4094 /* always enable with pattern 1 (as per spec) */
4095 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
4096 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
4097 I915_WRITE(intel_dp->output_reg, DP);
4098 POSTING_READ(intel_dp->output_reg);
4100 DP &= ~DP_PORT_EN;
4101 I915_WRITE(intel_dp->output_reg, DP);
4102 POSTING_READ(intel_dp->output_reg);
4105 msleep(intel_dp->panel_power_down_delay);
4108 static bool
4109 intel_dp_get_dpcd(struct intel_dp *intel_dp)
4111 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4112 struct drm_device *dev = dig_port->base.base.dev;
4113 struct drm_i915_private *dev_priv = dev->dev_private;
4114 uint8_t rev;
4116 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
4117 sizeof(intel_dp->dpcd)) < 0)
4118 return false; /* aux transfer failed */
4120 #ifdef __DragonFly__
4121 char dpcd_hex_dump[DP_RECEIVER_CAP_SIZE * 3];
4122 DRM_DEBUG_KMS("DPCD: %s\n", hexncpy(intel_dp->dpcd, sizeof(intel_dp->dpcd),
4123 dpcd_hex_dump, sizeof(dpcd_hex_dump), " "));
4124 #else
4125 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
4126 #endif
4128 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
4129 return false; /* DPCD not present */
4131 /* Check if the panel supports PSR */
4132 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
4133 if (is_edp(intel_dp)) {
4134 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
4135 intel_dp->psr_dpcd,
4136 sizeof(intel_dp->psr_dpcd));
4137 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
4138 dev_priv->psr.sink_support = true;
4139 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
4142 if (INTEL_INFO(dev)->gen >= 9 &&
4143 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
4144 uint8_t frame_sync_cap;
4146 dev_priv->psr.sink_support = true;
4147 intel_dp_dpcd_read_wake(&intel_dp->aux,
4148 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
4149 &frame_sync_cap, 1);
4150 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
4151 /* PSR2 needs frame sync as well */
4152 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
4153 DRM_DEBUG_KMS("PSR2 %s on sink",
4154 dev_priv->psr.psr2_support ? "supported" : "not supported");
4158 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
4159 yesno(intel_dp_source_supports_hbr2(dev)),
4160 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
4162 /* Intermediate frequency support */
4163 if (is_edp(intel_dp) &&
4164 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
4165 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
4166 (rev >= 0x03)) { /* eDp v1.4 or higher */
4167 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
4168 int i;
4170 intel_dp_dpcd_read_wake(&intel_dp->aux,
4171 DP_SUPPORTED_LINK_RATES,
4172 sink_rates,
4173 sizeof(sink_rates));
4175 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4176 int val = le16_to_cpu(sink_rates[i]);
4178 if (val == 0)
4179 break;
4181 /* Value read is in kHz while drm clock is saved in deca-kHz */
4182 intel_dp->sink_rates[i] = (val * 200) / 10;
4184 intel_dp->num_sink_rates = i;
4187 intel_dp_print_rates(intel_dp);
4189 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4190 DP_DWN_STRM_PORT_PRESENT))
4191 return true; /* native DP sink */
4193 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4194 return true; /* no per-port downstream info */
4196 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4197 intel_dp->downstream_ports,
4198 DP_MAX_DOWNSTREAM_PORTS) < 0)
4199 return false; /* downstream port status fetch failed */
4201 return true;
4204 static void
4205 intel_dp_probe_oui(struct intel_dp *intel_dp)
4207 u8 buf[3];
4209 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4210 return;
4212 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4213 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4214 buf[0], buf[1], buf[2]);
4216 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4217 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4218 buf[0], buf[1], buf[2]);
4221 static bool
4222 intel_dp_probe_mst(struct intel_dp *intel_dp)
4224 u8 buf[1];
4226 if (!intel_dp->can_mst)
4227 return false;
4229 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4230 return false;
4232 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4233 if (buf[0] & DP_MST_CAP) {
4234 DRM_DEBUG_KMS("Sink is MST capable\n");
4235 intel_dp->is_mst = true;
4236 } else {
4237 DRM_DEBUG_KMS("Sink is not MST capable\n");
4238 intel_dp->is_mst = false;
4242 #if 0
4243 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4244 return intel_dp->is_mst;
4245 #else
4246 return false;
4247 #endif
4250 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4252 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4253 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4254 u8 buf;
4255 int ret = 0;
4257 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4258 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4259 ret = -EIO;
4260 goto out;
4263 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4264 buf & ~DP_TEST_SINK_START) < 0) {
4265 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4266 ret = -EIO;
4267 goto out;
4270 intel_dp->sink_crc.started = false;
4271 out:
4272 hsw_enable_ips(intel_crtc);
4273 return ret;
4276 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4278 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4279 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4280 u8 buf;
4281 int ret;
4283 if (intel_dp->sink_crc.started) {
4284 ret = intel_dp_sink_crc_stop(intel_dp);
4285 if (ret)
4286 return ret;
4289 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4290 return -EIO;
4292 if (!(buf & DP_TEST_CRC_SUPPORTED))
4293 return -ENOTTY;
4295 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4297 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4298 return -EIO;
4300 hsw_disable_ips(intel_crtc);
4302 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4303 buf | DP_TEST_SINK_START) < 0) {
4304 hsw_enable_ips(intel_crtc);
4305 return -EIO;
4308 intel_dp->sink_crc.started = true;
4309 return 0;
4312 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4314 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4315 struct drm_device *dev = dig_port->base.base.dev;
4316 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4317 u8 buf;
4318 int count, ret;
4319 int attempts = 6;
4320 bool old_equal_new;
4322 ret = intel_dp_sink_crc_start(intel_dp);
4323 if (ret)
4324 return ret;
4326 do {
4327 intel_wait_for_vblank(dev, intel_crtc->pipe);
4329 if (drm_dp_dpcd_readb(&intel_dp->aux,
4330 DP_TEST_SINK_MISC, &buf) < 0) {
4331 ret = -EIO;
4332 goto stop;
4334 count = buf & DP_TEST_COUNT_MASK;
4337 * Count might be reset during the loop. In this case
4338 * last known count needs to be reset as well.
4340 if (count == 0)
4341 intel_dp->sink_crc.last_count = 0;
4343 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4344 ret = -EIO;
4345 goto stop;
4348 old_equal_new = (count == intel_dp->sink_crc.last_count &&
4349 !memcmp(intel_dp->sink_crc.last_crc, crc,
4350 6 * sizeof(u8)));
4352 } while (--attempts && (count == 0 || old_equal_new));
4354 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4355 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
4357 if (attempts == 0) {
4358 if (old_equal_new) {
4359 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4360 } else {
4361 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4362 ret = -ETIMEDOUT;
4363 goto stop;
4367 stop:
4368 intel_dp_sink_crc_stop(intel_dp);
4369 return ret;
4372 static bool
4373 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4375 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4376 DP_DEVICE_SERVICE_IRQ_VECTOR,
4377 sink_irq_vector, 1) == 1;
4380 #if 0
4381 static bool
4382 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4384 int ret;
4386 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4387 DP_SINK_COUNT_ESI,
4388 sink_irq_vector, 14);
4389 if (ret != 14)
4390 return false;
4392 return true;
4394 #endif
4396 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4398 uint8_t test_result = DP_TEST_ACK;
4399 return test_result;
4402 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4404 uint8_t test_result = DP_TEST_NAK;
4405 return test_result;
4408 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4410 uint8_t test_result = DP_TEST_NAK;
4411 struct intel_connector *intel_connector = intel_dp->attached_connector;
4412 struct drm_connector *connector = &intel_connector->base;
4414 if (intel_connector->detect_edid == NULL ||
4415 connector->edid_corrupt ||
4416 intel_dp->aux.i2c_defer_count > 6) {
4417 /* Check EDID read for NACKs, DEFERs and corruption
4418 * (DP CTS 1.2 Core r1.1)
4419 * 4.2.2.4 : Failed EDID read, I2C_NAK
4420 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4421 * 4.2.2.6 : EDID corruption detected
4422 * Use failsafe mode for all cases
4424 if (intel_dp->aux.i2c_nack_count > 0 ||
4425 intel_dp->aux.i2c_defer_count > 0)
4426 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4427 intel_dp->aux.i2c_nack_count,
4428 intel_dp->aux.i2c_defer_count);
4429 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4430 } else {
4431 struct edid *block = intel_connector->detect_edid;
4433 /* We have to write the checksum
4434 * of the last block read
4436 block += intel_connector->detect_edid->extensions;
4438 if (!drm_dp_dpcd_write(&intel_dp->aux,
4439 DP_TEST_EDID_CHECKSUM,
4440 &block->checksum,
4442 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4444 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4445 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4448 /* Set test active flag here so userspace doesn't interrupt things */
4449 intel_dp->compliance_test_active = 1;
4451 return test_result;
4454 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4456 uint8_t test_result = DP_TEST_NAK;
4457 return test_result;
4460 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4462 uint8_t response = DP_TEST_NAK;
4463 uint8_t rxdata = 0;
4464 int status = 0;
4466 intel_dp->compliance_test_active = 0;
4467 intel_dp->compliance_test_type = 0;
4468 intel_dp->compliance_test_data = 0;
4470 intel_dp->aux.i2c_nack_count = 0;
4471 intel_dp->aux.i2c_defer_count = 0;
4473 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4474 if (status <= 0) {
4475 DRM_DEBUG_KMS("Could not read test request from sink\n");
4476 goto update_status;
4479 switch (rxdata) {
4480 case DP_TEST_LINK_TRAINING:
4481 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4482 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4483 response = intel_dp_autotest_link_training(intel_dp);
4484 break;
4485 case DP_TEST_LINK_VIDEO_PATTERN:
4486 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4487 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4488 response = intel_dp_autotest_video_pattern(intel_dp);
4489 break;
4490 case DP_TEST_LINK_EDID_READ:
4491 DRM_DEBUG_KMS("EDID test requested\n");
4492 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4493 response = intel_dp_autotest_edid(intel_dp);
4494 break;
4495 case DP_TEST_LINK_PHY_TEST_PATTERN:
4496 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4497 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4498 response = intel_dp_autotest_phy_pattern(intel_dp);
4499 break;
4500 default:
4501 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4502 break;
4505 update_status:
4506 status = drm_dp_dpcd_write(&intel_dp->aux,
4507 DP_TEST_RESPONSE,
4508 &response, 1);
4509 if (status <= 0)
4510 DRM_DEBUG_KMS("Could not write test response to sink\n");
4513 #if 0
4514 static int
4515 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4517 bool bret;
4519 if (intel_dp->is_mst) {
4520 u8 esi[16] = { 0 };
4521 int ret = 0;
4522 int retry;
4523 bool handled;
4524 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4525 go_again:
4526 if (bret == true) {
4528 /* check link status - esi[10] = 0x200c */
4529 if (intel_dp->active_mst_links &&
4530 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4531 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4532 intel_dp_start_link_train(intel_dp);
4533 intel_dp_stop_link_train(intel_dp);
4536 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4537 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4539 if (handled) {
4540 for (retry = 0; retry < 3; retry++) {
4541 int wret;
4542 wret = drm_dp_dpcd_write(&intel_dp->aux,
4543 DP_SINK_COUNT_ESI+1,
4544 &esi[1], 3);
4545 if (wret == 3) {
4546 break;
4550 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4551 if (bret == true) {
4552 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4553 goto go_again;
4555 } else
4556 ret = 0;
4558 return ret;
4559 } else {
4560 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4561 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4562 intel_dp->is_mst = false;
4563 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4564 /* send a hotplug event */
4565 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4568 return -EINVAL;
4570 #endif
4573 * According to DP spec
4574 * 5.1.2:
4575 * 1. Read DPCD
4576 * 2. Configure link according to Receiver Capabilities
4577 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4578 * 4. Check link status on receipt of hot-plug interrupt
4580 static void
4581 intel_dp_check_link_status(struct intel_dp *intel_dp)
4583 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4584 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4585 u8 sink_irq_vector;
4586 u8 link_status[DP_LINK_STATUS_SIZE];
4588 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4590 if (!intel_encoder->base.crtc)
4591 return;
4593 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4594 return;
4596 /* Try to read receiver status if the link appears to be up */
4597 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4598 return;
4601 /* Now read the DPCD to see if it's actually running */
4602 if (!intel_dp_get_dpcd(intel_dp)) {
4603 return;
4606 /* Try to read the source of the interrupt */
4607 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4608 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4609 /* Clear interrupt source */
4610 drm_dp_dpcd_writeb(&intel_dp->aux,
4611 DP_DEVICE_SERVICE_IRQ_VECTOR,
4612 sink_irq_vector);
4614 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4615 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4616 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4617 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4620 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4621 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4622 intel_encoder->base.name);
4623 intel_dp_start_link_train(intel_dp);
4624 intel_dp_stop_link_train(intel_dp);
4628 /* XXX this is probably wrong for multiple downstream ports */
4629 static enum drm_connector_status
4630 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4632 uint8_t *dpcd = intel_dp->dpcd;
4633 uint8_t type;
4635 if (!intel_dp_get_dpcd(intel_dp))
4636 return connector_status_disconnected;
4638 /* if there's no downstream port, we're done */
4639 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4640 return connector_status_connected;
4642 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4643 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4644 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4645 uint8_t reg;
4647 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4648 &reg, 1) < 0)
4649 return connector_status_unknown;
4651 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4652 : connector_status_disconnected;
4655 /* If no HPD, poke DDC gently */
4656 if (drm_probe_ddc(intel_dp->aux.ddc))
4657 return connector_status_connected;
4659 /* Well we tried, say unknown for unreliable port types */
4660 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4661 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4662 if (type == DP_DS_PORT_TYPE_VGA ||
4663 type == DP_DS_PORT_TYPE_NON_EDID)
4664 return connector_status_unknown;
4665 } else {
4666 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4667 DP_DWN_STRM_PORT_TYPE_MASK;
4668 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4669 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4670 return connector_status_unknown;
4673 /* Anything else is out of spec, warn and ignore */
4674 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4675 return connector_status_disconnected;
4678 static enum drm_connector_status
4679 edp_detect(struct intel_dp *intel_dp)
4681 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4682 enum drm_connector_status status;
4684 status = intel_panel_detect(dev);
4685 if (status == connector_status_unknown)
4686 status = connector_status_connected;
4688 return status;
4691 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4692 struct intel_digital_port *port)
4694 u32 bit;
4696 switch (port->port) {
4697 case PORT_A:
4698 return true;
4699 case PORT_B:
4700 bit = SDE_PORTB_HOTPLUG;
4701 break;
4702 case PORT_C:
4703 bit = SDE_PORTC_HOTPLUG;
4704 break;
4705 case PORT_D:
4706 bit = SDE_PORTD_HOTPLUG;
4707 break;
4708 default:
4709 MISSING_CASE(port->port);
4710 return false;
4713 return I915_READ(SDEISR) & bit;
4716 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4717 struct intel_digital_port *port)
4719 u32 bit;
4721 switch (port->port) {
4722 case PORT_A:
4723 return true;
4724 case PORT_B:
4725 bit = SDE_PORTB_HOTPLUG_CPT;
4726 break;
4727 case PORT_C:
4728 bit = SDE_PORTC_HOTPLUG_CPT;
4729 break;
4730 case PORT_D:
4731 bit = SDE_PORTD_HOTPLUG_CPT;
4732 break;
4733 case PORT_E:
4734 bit = SDE_PORTE_HOTPLUG_SPT;
4735 break;
4736 default:
4737 MISSING_CASE(port->port);
4738 return false;
4741 return I915_READ(SDEISR) & bit;
4744 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4745 struct intel_digital_port *port)
4747 u32 bit;
4749 switch (port->port) {
4750 case PORT_B:
4751 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4752 break;
4753 case PORT_C:
4754 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4755 break;
4756 case PORT_D:
4757 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4758 break;
4759 default:
4760 MISSING_CASE(port->port);
4761 return false;
4764 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4767 static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4768 struct intel_digital_port *port)
4770 u32 bit;
4772 switch (port->port) {
4773 case PORT_B:
4774 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4775 break;
4776 case PORT_C:
4777 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4778 break;
4779 case PORT_D:
4780 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4781 break;
4782 default:
4783 MISSING_CASE(port->port);
4784 return false;
4787 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4790 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4791 struct intel_digital_port *intel_dig_port)
4793 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4794 enum port port;
4795 u32 bit;
4797 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4798 switch (port) {
4799 case PORT_A:
4800 bit = BXT_DE_PORT_HP_DDIA;
4801 break;
4802 case PORT_B:
4803 bit = BXT_DE_PORT_HP_DDIB;
4804 break;
4805 case PORT_C:
4806 bit = BXT_DE_PORT_HP_DDIC;
4807 break;
4808 default:
4809 MISSING_CASE(port);
4810 return false;
4813 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4817 * intel_digital_port_connected - is the specified port connected?
4818 * @dev_priv: i915 private structure
4819 * @port: the port to test
4821 * Return %true if @port is connected, %false otherwise.
4823 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4824 struct intel_digital_port *port)
4826 if (HAS_PCH_IBX(dev_priv))
4827 return ibx_digital_port_connected(dev_priv, port);
4828 if (HAS_PCH_SPLIT(dev_priv))
4829 return cpt_digital_port_connected(dev_priv, port);
4830 else if (IS_BROXTON(dev_priv))
4831 return bxt_digital_port_connected(dev_priv, port);
4832 else if (IS_VALLEYVIEW(dev_priv))
4833 return vlv_digital_port_connected(dev_priv, port);
4834 else
4835 return g4x_digital_port_connected(dev_priv, port);
4838 static enum drm_connector_status
4839 ironlake_dp_detect(struct intel_dp *intel_dp)
4841 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4842 struct drm_i915_private *dev_priv = dev->dev_private;
4843 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4845 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4846 return connector_status_disconnected;
4848 return intel_dp_detect_dpcd(intel_dp);
4851 static enum drm_connector_status
4852 g4x_dp_detect(struct intel_dp *intel_dp)
4854 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4855 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4857 /* Can't disconnect eDP, but you can close the lid... */
4858 if (is_edp(intel_dp)) {
4859 enum drm_connector_status status;
4861 status = intel_panel_detect(dev);
4862 if (status == connector_status_unknown)
4863 status = connector_status_connected;
4864 return status;
4867 if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
4868 return connector_status_disconnected;
4870 return intel_dp_detect_dpcd(intel_dp);
4873 static struct edid *
4874 intel_dp_get_edid(struct intel_dp *intel_dp)
4876 struct intel_connector *intel_connector = intel_dp->attached_connector;
4878 /* use cached edid if we have one */
4879 if (intel_connector->edid) {
4880 /* invalid edid */
4881 if (IS_ERR(intel_connector->edid))
4882 return NULL;
4884 return drm_edid_duplicate(intel_connector->edid);
4885 } else
4886 return drm_get_edid(&intel_connector->base,
4887 intel_dp->aux.ddc);
4890 static void
4891 intel_dp_set_edid(struct intel_dp *intel_dp)
4893 struct intel_connector *intel_connector = intel_dp->attached_connector;
4894 struct edid *edid;
4896 edid = intel_dp_get_edid(intel_dp);
4897 intel_connector->detect_edid = edid;
4899 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4900 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4901 else
4902 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4905 static void
4906 intel_dp_unset_edid(struct intel_dp *intel_dp)
4908 struct intel_connector *intel_connector = intel_dp->attached_connector;
4910 kfree(intel_connector->detect_edid);
4911 intel_connector->detect_edid = NULL;
4913 intel_dp->has_audio = false;
4916 static enum drm_connector_status
4917 intel_dp_detect(struct drm_connector *connector, bool force)
4919 struct intel_dp *intel_dp = intel_attached_dp(connector);
4920 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4921 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4922 struct drm_device *dev = connector->dev;
4923 enum drm_connector_status status;
4924 enum intel_display_power_domain power_domain;
4925 bool ret;
4926 u8 sink_irq_vector;
4928 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4929 connector->base.id, connector->name);
4930 intel_dp_unset_edid(intel_dp);
4932 if (intel_dp->is_mst) {
4933 /* MST devices are disconnected from a monitor POV */
4934 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4935 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4936 return connector_status_disconnected;
4939 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4940 intel_display_power_get(to_i915(dev), power_domain);
4942 /* Can't disconnect eDP, but you can close the lid... */
4943 if (is_edp(intel_dp))
4944 status = edp_detect(intel_dp);
4945 else if (HAS_PCH_SPLIT(dev))
4946 status = ironlake_dp_detect(intel_dp);
4947 else
4948 status = g4x_dp_detect(intel_dp);
4949 if (status != connector_status_connected)
4950 goto out;
4952 intel_dp_probe_oui(intel_dp);
4954 ret = intel_dp_probe_mst(intel_dp);
4955 if (ret) {
4956 /* if we are in MST mode then this connector
4957 won't appear connected or have anything with EDID on it */
4958 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4959 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4960 status = connector_status_disconnected;
4961 goto out;
4964 intel_dp_set_edid(intel_dp);
4966 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4967 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4968 status = connector_status_connected;
4970 /* Try to read the source of the interrupt */
4971 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4972 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4973 /* Clear interrupt source */
4974 drm_dp_dpcd_writeb(&intel_dp->aux,
4975 DP_DEVICE_SERVICE_IRQ_VECTOR,
4976 sink_irq_vector);
4978 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4979 intel_dp_handle_test_request(intel_dp);
4980 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4981 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4984 out:
4985 intel_display_power_put(to_i915(dev), power_domain);
4986 return status;
4989 static void
4990 intel_dp_force(struct drm_connector *connector)
4992 struct intel_dp *intel_dp = intel_attached_dp(connector);
4993 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4994 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4995 enum intel_display_power_domain power_domain;
4997 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4998 connector->base.id, connector->name);
4999 intel_dp_unset_edid(intel_dp);
5001 if (connector->status != connector_status_connected)
5002 return;
5004 power_domain = intel_display_port_aux_power_domain(intel_encoder);
5005 intel_display_power_get(dev_priv, power_domain);
5007 intel_dp_set_edid(intel_dp);
5009 intel_display_power_put(dev_priv, power_domain);
5011 if (intel_encoder->type != INTEL_OUTPUT_EDP)
5012 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
5015 static int intel_dp_get_modes(struct drm_connector *connector)
5017 struct intel_connector *intel_connector = to_intel_connector(connector);
5018 struct edid *edid;
5020 edid = intel_connector->detect_edid;
5021 if (edid) {
5022 int ret = intel_connector_update_modes(connector, edid);
5023 if (ret)
5024 return ret;
5027 /* if eDP has no EDID, fall back to fixed mode */
5028 if (is_edp(intel_attached_dp(connector)) &&
5029 intel_connector->panel.fixed_mode) {
5030 struct drm_display_mode *mode;
5032 mode = drm_mode_duplicate(connector->dev,
5033 intel_connector->panel.fixed_mode);
5034 if (mode) {
5035 drm_mode_probed_add(connector, mode);
5036 return 1;
5040 return 0;
5043 static bool
5044 intel_dp_detect_audio(struct drm_connector *connector)
5046 bool has_audio = false;
5047 struct edid *edid;
5049 edid = to_intel_connector(connector)->detect_edid;
5050 if (edid)
5051 has_audio = drm_detect_monitor_audio(edid);
5053 return has_audio;
5056 static int
5057 intel_dp_set_property(struct drm_connector *connector,
5058 struct drm_property *property,
5059 uint64_t val)
5061 struct drm_i915_private *dev_priv = connector->dev->dev_private;
5062 struct intel_connector *intel_connector = to_intel_connector(connector);
5063 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
5064 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5065 int ret;
5067 ret = drm_object_property_set_value(&connector->base, property, val);
5068 if (ret)
5069 return ret;
5071 if (property == dev_priv->force_audio_property) {
5072 int i = val;
5073 bool has_audio;
5075 if (i == intel_dp->force_audio)
5076 return 0;
5078 intel_dp->force_audio = i;
5080 if (i == HDMI_AUDIO_AUTO)
5081 has_audio = intel_dp_detect_audio(connector);
5082 else
5083 has_audio = (i == HDMI_AUDIO_ON);
5085 if (has_audio == intel_dp->has_audio)
5086 return 0;
5088 intel_dp->has_audio = has_audio;
5089 goto done;
5092 if (property == dev_priv->broadcast_rgb_property) {
5093 bool old_auto = intel_dp->color_range_auto;
5094 bool old_range = intel_dp->limited_color_range;
5096 switch (val) {
5097 case INTEL_BROADCAST_RGB_AUTO:
5098 intel_dp->color_range_auto = true;
5099 break;
5100 case INTEL_BROADCAST_RGB_FULL:
5101 intel_dp->color_range_auto = false;
5102 intel_dp->limited_color_range = false;
5103 break;
5104 case INTEL_BROADCAST_RGB_LIMITED:
5105 intel_dp->color_range_auto = false;
5106 intel_dp->limited_color_range = true;
5107 break;
5108 default:
5109 return -EINVAL;
5112 if (old_auto == intel_dp->color_range_auto &&
5113 old_range == intel_dp->limited_color_range)
5114 return 0;
5116 goto done;
5119 if (is_edp(intel_dp) &&
5120 property == connector->dev->mode_config.scaling_mode_property) {
5121 if (val == DRM_MODE_SCALE_NONE) {
5122 DRM_DEBUG_KMS("no scaling not supported\n");
5123 return -EINVAL;
5126 if (intel_connector->panel.fitting_mode == val) {
5127 /* the eDP scaling property is not changed */
5128 return 0;
5130 intel_connector->panel.fitting_mode = val;
5132 goto done;
5135 return -EINVAL;
5137 done:
5138 if (intel_encoder->base.crtc)
5139 intel_crtc_restore_mode(intel_encoder->base.crtc);
5141 return 0;
5144 static void
5145 intel_dp_connector_destroy(struct drm_connector *connector)
5147 struct intel_connector *intel_connector = to_intel_connector(connector);
5149 kfree(intel_connector->detect_edid);
5151 if (!IS_ERR_OR_NULL(intel_connector->edid))
5152 kfree(intel_connector->edid);
5154 /* Can't call is_edp() since the encoder may have been destroyed
5155 * already. */
5156 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5157 intel_panel_fini(&intel_connector->panel);
5159 drm_connector_cleanup(connector);
5160 kfree(connector);
5163 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5165 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5166 struct intel_dp *intel_dp = &intel_dig_port->dp;
5168 drm_dp_aux_unregister(&intel_dp->aux);
5169 intel_dp_mst_encoder_cleanup(intel_dig_port);
5170 if (is_edp(intel_dp)) {
5171 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5173 * vdd might still be enabled do to the delayed vdd off.
5174 * Make sure vdd is actually turned off here.
5176 pps_lock(intel_dp);
5177 edp_panel_vdd_off_sync(intel_dp);
5178 pps_unlock(intel_dp);
5180 #if 0
5181 if (intel_dp->edp_notifier.notifier_call) {
5182 unregister_reboot_notifier(&intel_dp->edp_notifier);
5183 intel_dp->edp_notifier.notifier_call = NULL;
5185 #endif
5187 drm_encoder_cleanup(encoder);
5188 kfree(intel_dig_port);
5191 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5193 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5195 if (!is_edp(intel_dp))
5196 return;
5199 * vdd might still be enabled do to the delayed vdd off.
5200 * Make sure vdd is actually turned off here.
5202 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5203 pps_lock(intel_dp);
5204 edp_panel_vdd_off_sync(intel_dp);
5205 pps_unlock(intel_dp);
5208 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5210 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5211 struct drm_device *dev = intel_dig_port->base.base.dev;
5212 struct drm_i915_private *dev_priv = dev->dev_private;
5213 enum intel_display_power_domain power_domain;
5215 lockdep_assert_held(&dev_priv->pps_mutex);
5217 if (!edp_have_panel_vdd(intel_dp))
5218 return;
5221 * The VDD bit needs a power domain reference, so if the bit is
5222 * already enabled when we boot or resume, grab this reference and
5223 * schedule a vdd off, so we don't hold on to the reference
5224 * indefinitely.
5226 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5227 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
5228 intel_display_power_get(dev_priv, power_domain);
5230 edp_panel_vdd_schedule_off(intel_dp);
5233 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
5235 struct intel_dp *intel_dp;
5237 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
5238 return;
5240 intel_dp = enc_to_intel_dp(encoder);
5242 pps_lock(intel_dp);
5245 * Read out the current power sequencer assignment,
5246 * in case the BIOS did something with it.
5248 if (IS_VALLEYVIEW(encoder->dev))
5249 vlv_initial_power_sequencer_setup(intel_dp);
5251 intel_edp_panel_vdd_sanitize(intel_dp);
5253 pps_unlock(intel_dp);
5256 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5257 .dpms = drm_atomic_helper_connector_dpms,
5258 .detect = intel_dp_detect,
5259 .force = intel_dp_force,
5260 .fill_modes = drm_helper_probe_single_connector_modes,
5261 .set_property = intel_dp_set_property,
5262 .atomic_get_property = intel_connector_atomic_get_property,
5263 .destroy = intel_dp_connector_destroy,
5264 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5265 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5268 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5269 .get_modes = intel_dp_get_modes,
5270 .mode_valid = intel_dp_mode_valid,
5271 .best_encoder = intel_best_encoder,
5274 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5275 .reset = intel_dp_encoder_reset,
5276 .destroy = intel_dp_encoder_destroy,
5279 bool
5280 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5282 struct intel_dp *intel_dp = &intel_dig_port->dp;
5283 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5284 struct drm_device *dev = intel_dig_port->base.base.dev;
5285 struct drm_i915_private *dev_priv = dev->dev_private;
5286 enum intel_display_power_domain power_domain;
5287 bool ret = true;
5289 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5290 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5291 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5293 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5295 * vdd off can generate a long pulse on eDP which
5296 * would require vdd on to handle it, and thus we
5297 * would end up in an endless cycle of
5298 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5300 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5301 port_name(intel_dig_port->port));
5302 return false;
5305 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5306 port_name(intel_dig_port->port),
5307 long_hpd ? "long" : "short");
5309 power_domain = intel_display_port_aux_power_domain(intel_encoder);
5310 intel_display_power_get(dev_priv, power_domain);
5312 if (long_hpd) {
5313 /* indicate that we need to restart link training */
5314 intel_dp->train_set_valid = false;
5316 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5317 goto mst_fail;
5319 if (!intel_dp_get_dpcd(intel_dp)) {
5320 goto mst_fail;
5323 intel_dp_probe_oui(intel_dp);
5325 if (!intel_dp_probe_mst(intel_dp)) {
5326 goto mst_fail;
5328 } else {
5329 if (intel_dp->is_mst) {
5330 #if 0
5331 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5332 goto mst_fail;
5333 #endif
5336 if (!intel_dp->is_mst) {
5337 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5338 intel_dp_check_link_status(intel_dp);
5339 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5343 ret = false;
5345 goto put_power;
5346 mst_fail:
5347 /* if we were in MST mode, and device is not there get out of MST mode */
5348 if (intel_dp->is_mst) {
5349 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5350 intel_dp->is_mst = false;
5351 #if 0
5352 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5353 #endif
5355 put_power:
5356 intel_display_power_put(dev_priv, power_domain);
5358 return ret;
5361 /* Return which DP Port should be selected for Transcoder DP control */
5363 intel_trans_dp_port_sel(struct drm_crtc *crtc)
5365 struct drm_device *dev = crtc->dev;
5366 struct intel_encoder *intel_encoder;
5367 struct intel_dp *intel_dp;
5369 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5370 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5372 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5373 intel_encoder->type == INTEL_OUTPUT_EDP)
5374 return intel_dp->output_reg;
5377 return -1;
5380 /* check the VBT to see whether the eDP is on another port */
5381 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5383 struct drm_i915_private *dev_priv = dev->dev_private;
5384 union child_device_config *p_child;
5385 int i;
5386 static const short port_mapping[] = {
5387 [PORT_B] = DVO_PORT_DPB,
5388 [PORT_C] = DVO_PORT_DPC,
5389 [PORT_D] = DVO_PORT_DPD,
5390 [PORT_E] = DVO_PORT_DPE,
5394 * eDP not supported on g4x. so bail out early just
5395 * for a bit extra safety in case the VBT is bonkers.
5397 if (INTEL_INFO(dev)->gen < 5)
5398 return false;
5400 if (port == PORT_A)
5401 return true;
5403 if (!dev_priv->vbt.child_dev_num)
5404 return false;
5406 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5407 p_child = dev_priv->vbt.child_dev + i;
5409 if (p_child->common.dvo_port == port_mapping[port] &&
5410 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5411 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5412 return true;
5414 return false;
5417 void
5418 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5420 struct intel_connector *intel_connector = to_intel_connector(connector);
5422 intel_attach_force_audio_property(connector);
5423 intel_attach_broadcast_rgb_property(connector);
5424 intel_dp->color_range_auto = true;
5426 if (is_edp(intel_dp)) {
5427 drm_mode_create_scaling_mode_property(connector->dev);
5428 drm_object_attach_property(
5429 &connector->base,
5430 connector->dev->mode_config.scaling_mode_property,
5431 DRM_MODE_SCALE_ASPECT);
5432 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5436 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5438 intel_dp->last_power_cycle = jiffies;
5439 intel_dp->last_power_on = jiffies;
5440 intel_dp->last_backlight_off = jiffies;
5443 static void
5444 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5445 struct intel_dp *intel_dp)
5447 struct drm_i915_private *dev_priv = dev->dev_private;
5448 struct edp_power_seq cur, vbt, spec,
5449 *final = &intel_dp->pps_delays;
5450 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5451 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
5453 lockdep_assert_held(&dev_priv->pps_mutex);
5455 /* already initialized? */
5456 if (final->t11_t12 != 0)
5457 return;
5459 if (IS_BROXTON(dev)) {
5461 * TODO: BXT has 2 sets of PPS registers.
5462 * Correct Register for Broxton need to be identified
5463 * using VBT. hardcoding for now
5465 pp_ctrl_reg = BXT_PP_CONTROL(0);
5466 pp_on_reg = BXT_PP_ON_DELAYS(0);
5467 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5468 } else if (HAS_PCH_SPLIT(dev)) {
5469 pp_ctrl_reg = PCH_PP_CONTROL;
5470 pp_on_reg = PCH_PP_ON_DELAYS;
5471 pp_off_reg = PCH_PP_OFF_DELAYS;
5472 pp_div_reg = PCH_PP_DIVISOR;
5473 } else {
5474 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5476 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5477 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5478 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5479 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5482 /* Workaround: Need to write PP_CONTROL with the unlock key as
5483 * the very first thing. */
5484 pp_ctl = ironlake_get_pp_control(intel_dp);
5486 pp_on = I915_READ(pp_on_reg);
5487 pp_off = I915_READ(pp_off_reg);
5488 if (!IS_BROXTON(dev)) {
5489 I915_WRITE(pp_ctrl_reg, pp_ctl);
5490 pp_div = I915_READ(pp_div_reg);
5493 /* Pull timing values out of registers */
5494 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5495 PANEL_POWER_UP_DELAY_SHIFT;
5497 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5498 PANEL_LIGHT_ON_DELAY_SHIFT;
5500 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5501 PANEL_LIGHT_OFF_DELAY_SHIFT;
5503 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5504 PANEL_POWER_DOWN_DELAY_SHIFT;
5506 if (IS_BROXTON(dev)) {
5507 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5508 BXT_POWER_CYCLE_DELAY_SHIFT;
5509 if (tmp > 0)
5510 cur.t11_t12 = (tmp - 1) * 1000;
5511 else
5512 cur.t11_t12 = 0;
5513 } else {
5514 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5515 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5518 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5519 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5521 vbt = dev_priv->vbt.edp_pps;
5523 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5524 * our hw here, which are all in 100usec. */
5525 spec.t1_t3 = 210 * 10;
5526 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5527 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5528 spec.t10 = 500 * 10;
5529 /* This one is special and actually in units of 100ms, but zero
5530 * based in the hw (so we need to add 100 ms). But the sw vbt
5531 * table multiplies it with 1000 to make it in units of 100usec,
5532 * too. */
5533 spec.t11_t12 = (510 + 100) * 10;
5535 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5536 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5538 /* Use the max of the register settings and vbt. If both are
5539 * unset, fall back to the spec limits. */
5540 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5541 spec.field : \
5542 max(cur.field, vbt.field))
5543 assign_final(t1_t3);
5544 assign_final(t8);
5545 assign_final(t9);
5546 assign_final(t10);
5547 assign_final(t11_t12);
5548 #undef assign_final
5550 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5551 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5552 intel_dp->backlight_on_delay = get_delay(t8);
5553 intel_dp->backlight_off_delay = get_delay(t9);
5554 intel_dp->panel_power_down_delay = get_delay(t10);
5555 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5556 #undef get_delay
5558 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5559 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5560 intel_dp->panel_power_cycle_delay);
5562 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5563 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5566 static void
5567 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5568 struct intel_dp *intel_dp)
5570 struct drm_i915_private *dev_priv = dev->dev_private;
5571 u32 pp_on, pp_off, pp_div, port_sel = 0;
5572 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5573 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5574 enum port port = dp_to_dig_port(intel_dp)->port;
5575 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5577 lockdep_assert_held(&dev_priv->pps_mutex);
5579 if (IS_BROXTON(dev)) {
5581 * TODO: BXT has 2 sets of PPS registers.
5582 * Correct Register for Broxton need to be identified
5583 * using VBT. hardcoding for now
5585 pp_ctrl_reg = BXT_PP_CONTROL(0);
5586 pp_on_reg = BXT_PP_ON_DELAYS(0);
5587 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5589 } else if (HAS_PCH_SPLIT(dev)) {
5590 pp_on_reg = PCH_PP_ON_DELAYS;
5591 pp_off_reg = PCH_PP_OFF_DELAYS;
5592 pp_div_reg = PCH_PP_DIVISOR;
5593 } else {
5594 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5596 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5597 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5598 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5602 * And finally store the new values in the power sequencer. The
5603 * backlight delays are set to 1 because we do manual waits on them. For
5604 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5605 * we'll end up waiting for the backlight off delay twice: once when we
5606 * do the manual sleep, and once when we disable the panel and wait for
5607 * the PP_STATUS bit to become zero.
5609 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5610 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5611 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5612 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5613 /* Compute the divisor for the pp clock, simply match the Bspec
5614 * formula. */
5615 if (IS_BROXTON(dev)) {
5616 pp_div = I915_READ(pp_ctrl_reg);
5617 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5618 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5619 << BXT_POWER_CYCLE_DELAY_SHIFT);
5620 } else {
5621 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5622 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5623 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5626 /* Haswell doesn't have any port selection bits for the panel
5627 * power sequencer any more. */
5628 if (IS_VALLEYVIEW(dev)) {
5629 port_sel = PANEL_PORT_SELECT_VLV(port);
5630 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5631 if (port == PORT_A)
5632 port_sel = PANEL_PORT_SELECT_DPA;
5633 else
5634 port_sel = PANEL_PORT_SELECT_DPD;
5637 pp_on |= port_sel;
5639 I915_WRITE(pp_on_reg, pp_on);
5640 I915_WRITE(pp_off_reg, pp_off);
5641 if (IS_BROXTON(dev))
5642 I915_WRITE(pp_ctrl_reg, pp_div);
5643 else
5644 I915_WRITE(pp_div_reg, pp_div);
5646 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5647 I915_READ(pp_on_reg),
5648 I915_READ(pp_off_reg),
5649 IS_BROXTON(dev) ?
5650 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5651 I915_READ(pp_div_reg));
5655 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5656 * @dev: DRM device
5657 * @refresh_rate: RR to be programmed
5659 * This function gets called when refresh rate (RR) has to be changed from
5660 * one frequency to another. Switches can be between high and low RR
5661 * supported by the panel or to any other RR based on media playback (in
5662 * this case, RR value needs to be passed from user space).
5664 * The caller of this function needs to take a lock on dev_priv->drrs.
5666 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5668 struct drm_i915_private *dev_priv = dev->dev_private;
5669 struct intel_encoder *encoder;
5670 struct intel_digital_port *dig_port = NULL;
5671 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5672 struct intel_crtc_state *config = NULL;
5673 struct intel_crtc *intel_crtc = NULL;
5674 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5676 if (refresh_rate <= 0) {
5677 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5678 return;
5681 if (intel_dp == NULL) {
5682 DRM_DEBUG_KMS("DRRS not supported.\n");
5683 return;
5687 * FIXME: This needs proper synchronization with psr state for some
5688 * platforms that cannot have PSR and DRRS enabled at the same time.
5691 dig_port = dp_to_dig_port(intel_dp);
5692 encoder = &dig_port->base;
5693 intel_crtc = to_intel_crtc(encoder->base.crtc);
5695 if (!intel_crtc) {
5696 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5697 return;
5700 config = intel_crtc->config;
5702 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5703 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5704 return;
5707 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5708 refresh_rate)
5709 index = DRRS_LOW_RR;
5711 if (index == dev_priv->drrs.refresh_rate_type) {
5712 DRM_DEBUG_KMS(
5713 "DRRS requested for previously set RR...ignoring\n");
5714 return;
5717 if (!intel_crtc->active) {
5718 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5719 return;
5722 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5723 switch (index) {
5724 case DRRS_HIGH_RR:
5725 intel_dp_set_m_n(intel_crtc, M1_N1);
5726 break;
5727 case DRRS_LOW_RR:
5728 intel_dp_set_m_n(intel_crtc, M2_N2);
5729 break;
5730 case DRRS_MAX_RR:
5731 default:
5732 DRM_ERROR("Unsupported refreshrate type\n");
5734 } else if (INTEL_INFO(dev)->gen > 6) {
5735 u32 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5736 u32 val;
5738 val = I915_READ(reg);
5739 if (index > DRRS_HIGH_RR) {
5740 if (IS_VALLEYVIEW(dev))
5741 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5742 else
5743 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5744 } else {
5745 if (IS_VALLEYVIEW(dev))
5746 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5747 else
5748 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5750 I915_WRITE(reg, val);
5753 dev_priv->drrs.refresh_rate_type = index;
5755 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5759 * intel_edp_drrs_enable - init drrs struct if supported
5760 * @intel_dp: DP struct
5762 * Initializes frontbuffer_bits and drrs.dp
5764 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5766 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5767 struct drm_i915_private *dev_priv = dev->dev_private;
5768 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5769 struct drm_crtc *crtc = dig_port->base.base.crtc;
5770 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5772 if (!intel_crtc->config->has_drrs) {
5773 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5774 return;
5777 mutex_lock(&dev_priv->drrs.mutex);
5778 if (WARN_ON(dev_priv->drrs.dp)) {
5779 DRM_ERROR("DRRS already enabled\n");
5780 goto unlock;
5783 dev_priv->drrs.busy_frontbuffer_bits = 0;
5785 dev_priv->drrs.dp = intel_dp;
5787 unlock:
5788 mutex_unlock(&dev_priv->drrs.mutex);
5792 * intel_edp_drrs_disable - Disable DRRS
5793 * @intel_dp: DP struct
5796 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5798 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5799 struct drm_i915_private *dev_priv = dev->dev_private;
5800 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5801 struct drm_crtc *crtc = dig_port->base.base.crtc;
5802 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5804 if (!intel_crtc->config->has_drrs)
5805 return;
5807 mutex_lock(&dev_priv->drrs.mutex);
5808 if (!dev_priv->drrs.dp) {
5809 mutex_unlock(&dev_priv->drrs.mutex);
5810 return;
5813 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5814 intel_dp_set_drrs_state(dev_priv->dev,
5815 intel_dp->attached_connector->panel.
5816 fixed_mode->vrefresh);
5818 dev_priv->drrs.dp = NULL;
5819 mutex_unlock(&dev_priv->drrs.mutex);
5821 cancel_delayed_work_sync(&dev_priv->drrs.work);
5824 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5826 struct drm_i915_private *dev_priv =
5827 container_of(work, typeof(*dev_priv), drrs.work.work);
5828 struct intel_dp *intel_dp;
5830 mutex_lock(&dev_priv->drrs.mutex);
5832 intel_dp = dev_priv->drrs.dp;
5834 if (!intel_dp)
5835 goto unlock;
5838 * The delayed work can race with an invalidate hence we need to
5839 * recheck.
5842 if (dev_priv->drrs.busy_frontbuffer_bits)
5843 goto unlock;
5845 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5846 intel_dp_set_drrs_state(dev_priv->dev,
5847 intel_dp->attached_connector->panel.
5848 downclock_mode->vrefresh);
5850 unlock:
5851 mutex_unlock(&dev_priv->drrs.mutex);
5855 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5856 * @dev: DRM device
5857 * @frontbuffer_bits: frontbuffer plane tracking bits
5859 * This function gets called everytime rendering on the given planes start.
5860 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5862 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5864 void intel_edp_drrs_invalidate(struct drm_device *dev,
5865 unsigned frontbuffer_bits)
5867 struct drm_i915_private *dev_priv = dev->dev_private;
5868 struct drm_crtc *crtc;
5869 enum i915_pipe pipe;
5871 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5872 return;
5874 cancel_delayed_work(&dev_priv->drrs.work);
5876 mutex_lock(&dev_priv->drrs.mutex);
5877 if (!dev_priv->drrs.dp) {
5878 mutex_unlock(&dev_priv->drrs.mutex);
5879 return;
5882 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5883 pipe = to_intel_crtc(crtc)->pipe;
5885 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5886 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5888 /* invalidate means busy screen hence upclock */
5889 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5890 intel_dp_set_drrs_state(dev_priv->dev,
5891 dev_priv->drrs.dp->attached_connector->panel.
5892 fixed_mode->vrefresh);
5894 mutex_unlock(&dev_priv->drrs.mutex);
5898 * intel_edp_drrs_flush - Restart Idleness DRRS
5899 * @dev: DRM device
5900 * @frontbuffer_bits: frontbuffer plane tracking bits
5902 * This function gets called every time rendering on the given planes has
5903 * completed or flip on a crtc is completed. So DRRS should be upclocked
5904 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5905 * if no other planes are dirty.
5907 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5909 void intel_edp_drrs_flush(struct drm_device *dev,
5910 unsigned frontbuffer_bits)
5912 struct drm_i915_private *dev_priv = dev->dev_private;
5913 struct drm_crtc *crtc;
5914 enum i915_pipe pipe;
5916 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5917 return;
5919 cancel_delayed_work(&dev_priv->drrs.work);
5921 mutex_lock(&dev_priv->drrs.mutex);
5922 if (!dev_priv->drrs.dp) {
5923 mutex_unlock(&dev_priv->drrs.mutex);
5924 return;
5927 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5928 pipe = to_intel_crtc(crtc)->pipe;
5930 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5931 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5933 /* flush means busy screen hence upclock */
5934 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5935 intel_dp_set_drrs_state(dev_priv->dev,
5936 dev_priv->drrs.dp->attached_connector->panel.
5937 fixed_mode->vrefresh);
5940 * flush also means no more activity hence schedule downclock, if all
5941 * other fbs are quiescent too
5943 if (!dev_priv->drrs.busy_frontbuffer_bits)
5944 schedule_delayed_work(&dev_priv->drrs.work,
5945 msecs_to_jiffies(1000));
5946 mutex_unlock(&dev_priv->drrs.mutex);
5950 * DOC: Display Refresh Rate Switching (DRRS)
5952 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5953 * which enables swtching between low and high refresh rates,
5954 * dynamically, based on the usage scenario. This feature is applicable
5955 * for internal panels.
5957 * Indication that the panel supports DRRS is given by the panel EDID, which
5958 * would list multiple refresh rates for one resolution.
5960 * DRRS is of 2 types - static and seamless.
5961 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5962 * (may appear as a blink on screen) and is used in dock-undock scenario.
5963 * Seamless DRRS involves changing RR without any visual effect to the user
5964 * and can be used during normal system usage. This is done by programming
5965 * certain registers.
5967 * Support for static/seamless DRRS may be indicated in the VBT based on
5968 * inputs from the panel spec.
5970 * DRRS saves power by switching to low RR based on usage scenarios.
5972 * eDP DRRS:-
5973 * The implementation is based on frontbuffer tracking implementation.
5974 * When there is a disturbance on the screen triggered by user activity or a
5975 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5976 * When there is no movement on screen, after a timeout of 1 second, a switch
5977 * to low RR is made.
5978 * For integration with frontbuffer tracking code,
5979 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5981 * DRRS can be further extended to support other internal panels and also
5982 * the scenario of video playback wherein RR is set based on the rate
5983 * requested by userspace.
5987 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5988 * @intel_connector: eDP connector
5989 * @fixed_mode: preferred mode of panel
5991 * This function is called only once at driver load to initialize basic
5992 * DRRS stuff.
5994 * Returns:
5995 * Downclock mode if panel supports it, else return NULL.
5996 * DRRS support is determined by the presence of downclock mode (apart
5997 * from VBT setting).
5999 static struct drm_display_mode *
6000 intel_dp_drrs_init(struct intel_connector *intel_connector,
6001 struct drm_display_mode *fixed_mode)
6003 struct drm_connector *connector = &intel_connector->base;
6004 struct drm_device *dev = connector->dev;
6005 struct drm_i915_private *dev_priv = dev->dev_private;
6006 struct drm_display_mode *downclock_mode = NULL;
6008 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
6009 lockinit(&dev_priv->drrs.mutex, "i915dm", 0, LK_CANRECURSE);
6011 if (INTEL_INFO(dev)->gen <= 6) {
6012 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
6013 return NULL;
6016 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
6017 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
6018 return NULL;
6021 downclock_mode = intel_find_panel_downclock
6022 (dev, fixed_mode, connector);
6024 if (!downclock_mode) {
6025 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
6026 return NULL;
6029 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
6031 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
6032 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
6033 return downclock_mode;
6036 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
6037 struct intel_connector *intel_connector)
6039 struct drm_connector *connector = &intel_connector->base;
6040 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
6041 struct intel_encoder *intel_encoder = &intel_dig_port->base;
6042 struct drm_device *dev = intel_encoder->base.dev;
6043 struct drm_i915_private *dev_priv = dev->dev_private;
6044 struct drm_display_mode *fixed_mode = NULL;
6045 struct drm_display_mode *downclock_mode = NULL;
6046 bool has_dpcd;
6047 struct drm_display_mode *scan;
6048 struct edid *edid;
6049 enum i915_pipe pipe = INVALID_PIPE;
6051 if (!is_edp(intel_dp))
6052 return true;
6054 pps_lock(intel_dp);
6055 intel_edp_panel_vdd_sanitize(intel_dp);
6056 pps_unlock(intel_dp);
6058 /* Cache DPCD and EDID for edp. */
6059 has_dpcd = intel_dp_get_dpcd(intel_dp);
6061 if (has_dpcd) {
6062 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
6063 dev_priv->no_aux_handshake =
6064 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
6065 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
6066 } else {
6067 /* if this fails, presume the device is a ghost */
6068 DRM_INFO("failed to retrieve link info, disabling eDP\n");
6069 return false;
6072 /* We now know it's not a ghost, init power sequence regs. */
6073 pps_lock(intel_dp);
6074 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
6075 pps_unlock(intel_dp);
6077 mutex_lock(&dev->mode_config.mutex);
6078 edid = drm_get_edid(connector, intel_dp->aux.ddc);
6079 if (edid) {
6080 if (drm_add_edid_modes(connector, edid)) {
6081 drm_mode_connector_update_edid_property(connector,
6082 edid);
6083 drm_edid_to_eld(connector, edid);
6084 } else {
6085 kfree(edid);
6086 edid = ERR_PTR(-EINVAL);
6088 } else {
6089 edid = ERR_PTR(-ENOENT);
6091 intel_connector->edid = edid;
6093 /* prefer fixed mode from EDID if available */
6094 list_for_each_entry(scan, &connector->probed_modes, head) {
6095 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
6096 fixed_mode = drm_mode_duplicate(dev, scan);
6097 downclock_mode = intel_dp_drrs_init(
6098 intel_connector, fixed_mode);
6099 break;
6103 /* fallback to VBT if available for eDP */
6104 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
6105 fixed_mode = drm_mode_duplicate(dev,
6106 dev_priv->vbt.lfp_lvds_vbt_mode);
6107 if (fixed_mode)
6108 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
6110 mutex_unlock(&dev->mode_config.mutex);
6112 if (IS_VALLEYVIEW(dev)) {
6113 #if 0
6114 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
6115 register_reboot_notifier(&intel_dp->edp_notifier);
6116 #endif
6119 * Figure out the current pipe for the initial backlight setup.
6120 * If the current pipe isn't valid, try the PPS pipe, and if that
6121 * fails just assume pipe A.
6123 if (IS_CHERRYVIEW(dev))
6124 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
6125 else
6126 pipe = PORT_TO_PIPE(intel_dp->DP);
6128 if (pipe != PIPE_A && pipe != PIPE_B)
6129 pipe = intel_dp->pps_pipe;
6131 if (pipe != PIPE_A && pipe != PIPE_B)
6132 pipe = PIPE_A;
6134 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
6135 pipe_name(pipe));
6138 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
6139 intel_connector->panel.backlight.power = intel_edp_backlight_power;
6140 intel_panel_setup_backlight(connector, pipe);
6142 return true;
6145 bool
6146 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6147 struct intel_connector *intel_connector)
6149 struct drm_connector *connector = &intel_connector->base;
6150 struct intel_dp *intel_dp = &intel_dig_port->dp;
6151 struct intel_encoder *intel_encoder = &intel_dig_port->base;
6152 struct drm_device *dev = intel_encoder->base.dev;
6153 struct drm_i915_private *dev_priv = dev->dev_private;
6154 enum port port = intel_dig_port->port;
6155 int type;
6157 intel_dp->pps_pipe = INVALID_PIPE;
6159 /* intel_dp vfuncs */
6160 if (INTEL_INFO(dev)->gen >= 9)
6161 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
6162 else if (IS_VALLEYVIEW(dev))
6163 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
6164 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
6165 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
6166 else if (HAS_PCH_SPLIT(dev))
6167 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
6168 else
6169 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
6171 if (INTEL_INFO(dev)->gen >= 9)
6172 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
6173 else
6174 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
6176 /* Preserve the current hw state. */
6177 intel_dp->DP = I915_READ(intel_dp->output_reg);
6178 intel_dp->attached_connector = intel_connector;
6180 if (intel_dp_is_edp(dev, port))
6181 type = DRM_MODE_CONNECTOR_eDP;
6182 else
6183 type = DRM_MODE_CONNECTOR_DisplayPort;
6186 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6187 * for DP the encoder type can be set by the caller to
6188 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6190 if (type == DRM_MODE_CONNECTOR_eDP)
6191 intel_encoder->type = INTEL_OUTPUT_EDP;
6193 /* eDP only on port B and/or C on vlv/chv */
6194 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
6195 port != PORT_B && port != PORT_C))
6196 return false;
6198 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6199 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6200 port_name(port));
6202 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6203 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6205 connector->interlace_allowed = true;
6206 connector->doublescan_allowed = 0;
6208 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
6209 edp_panel_vdd_work);
6211 intel_connector_attach_encoder(intel_connector, intel_encoder);
6212 drm_connector_register(connector);
6214 if (HAS_DDI(dev))
6215 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6216 else
6217 intel_connector->get_hw_state = intel_connector_get_hw_state;
6218 intel_connector->unregister = intel_dp_connector_unregister;
6220 /* Set up the hotplug pin. */
6221 switch (port) {
6222 case PORT_A:
6223 intel_encoder->hpd_pin = HPD_PORT_A;
6224 break;
6225 case PORT_B:
6226 intel_encoder->hpd_pin = HPD_PORT_B;
6227 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
6228 intel_encoder->hpd_pin = HPD_PORT_A;
6229 break;
6230 case PORT_C:
6231 intel_encoder->hpd_pin = HPD_PORT_C;
6232 break;
6233 case PORT_D:
6234 intel_encoder->hpd_pin = HPD_PORT_D;
6235 break;
6236 case PORT_E:
6237 intel_encoder->hpd_pin = HPD_PORT_E;
6238 break;
6239 default:
6240 BUG();
6243 if (is_edp(intel_dp)) {
6244 pps_lock(intel_dp);
6245 intel_dp_init_panel_power_timestamps(intel_dp);
6246 if (IS_VALLEYVIEW(dev))
6247 vlv_initial_power_sequencer_setup(intel_dp);
6248 else
6249 intel_dp_init_panel_power_sequencer(dev, intel_dp);
6250 pps_unlock(intel_dp);
6253 intel_dp_aux_init(intel_dp, intel_connector);
6255 /* init MST on ports that can support it */
6256 if (HAS_DP_MST(dev) &&
6257 (port == PORT_B || port == PORT_C || port == PORT_D))
6258 intel_dp_mst_encoder_init(intel_dig_port,
6259 intel_connector->base.base.id);
6261 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6262 drm_dp_aux_unregister(&intel_dp->aux);
6263 if (is_edp(intel_dp)) {
6264 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6266 * vdd might still be enabled do to the delayed vdd off.
6267 * Make sure vdd is actually turned off here.
6269 pps_lock(intel_dp);
6270 edp_panel_vdd_off_sync(intel_dp);
6271 pps_unlock(intel_dp);
6273 drm_connector_unregister(connector);
6274 drm_connector_cleanup(connector);
6275 return false;
6278 intel_dp_add_properties(intel_dp, connector);
6280 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6281 * 0xd. Failure to do so will result in spurious interrupts being
6282 * generated on the port when a cable is not attached.
6284 if (IS_G4X(dev) && !IS_GM45(dev)) {
6285 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6286 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6289 i915_debugfs_connector_add(connector);
6291 return true;
6294 void
6295 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6297 struct drm_i915_private *dev_priv = dev->dev_private;
6298 struct intel_digital_port *intel_dig_port;
6299 struct intel_encoder *intel_encoder;
6300 struct drm_encoder *encoder;
6301 struct intel_connector *intel_connector;
6303 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6304 if (!intel_dig_port)
6305 return;
6307 intel_connector = intel_connector_alloc();
6308 if (!intel_connector)
6309 goto err_connector_alloc;
6311 intel_encoder = &intel_dig_port->base;
6312 encoder = &intel_encoder->base;
6314 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6315 DRM_MODE_ENCODER_TMDS);
6317 intel_encoder->compute_config = intel_dp_compute_config;
6318 intel_encoder->disable = intel_disable_dp;
6319 intel_encoder->get_hw_state = intel_dp_get_hw_state;
6320 intel_encoder->get_config = intel_dp_get_config;
6321 intel_encoder->suspend = intel_dp_encoder_suspend;
6322 if (IS_CHERRYVIEW(dev)) {
6323 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6324 intel_encoder->pre_enable = chv_pre_enable_dp;
6325 intel_encoder->enable = vlv_enable_dp;
6326 intel_encoder->post_disable = chv_post_disable_dp;
6327 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6328 } else if (IS_VALLEYVIEW(dev)) {
6329 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6330 intel_encoder->pre_enable = vlv_pre_enable_dp;
6331 intel_encoder->enable = vlv_enable_dp;
6332 intel_encoder->post_disable = vlv_post_disable_dp;
6333 } else {
6334 intel_encoder->pre_enable = g4x_pre_enable_dp;
6335 intel_encoder->enable = g4x_enable_dp;
6336 if (INTEL_INFO(dev)->gen >= 5)
6337 intel_encoder->post_disable = ilk_post_disable_dp;
6340 intel_dig_port->port = port;
6341 intel_dig_port->dp.output_reg = output_reg;
6343 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6344 if (IS_CHERRYVIEW(dev)) {
6345 if (port == PORT_D)
6346 intel_encoder->crtc_mask = 1 << 2;
6347 else
6348 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6349 } else {
6350 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6352 intel_encoder->cloneable = 0;
6354 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6355 dev_priv->hotplug.irq_port[port] = intel_dig_port;
6357 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6358 goto err_init_connector;
6360 return;
6362 err_init_connector:
6363 drm_encoder_cleanup(encoder);
6364 kfree(intel_connector);
6365 err_connector_alloc:
6366 kfree(intel_dig_port);
6368 return;
6371 #if 0
6372 void intel_dp_mst_suspend(struct drm_device *dev)
6374 struct drm_i915_private *dev_priv = dev->dev_private;
6375 int i;
6377 /* disable MST */
6378 for (i = 0; i < I915_MAX_PORTS; i++) {
6379 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6380 if (!intel_dig_port)
6381 continue;
6383 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6384 if (!intel_dig_port->dp.can_mst)
6385 continue;
6386 if (intel_dig_port->dp.is_mst)
6387 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6391 #endif
6393 void intel_dp_mst_resume(struct drm_device *dev)
6395 struct drm_i915_private *dev_priv = dev->dev_private;
6396 int i;
6398 for (i = 0; i < I915_MAX_PORTS; i++) {
6399 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6400 if (!intel_dig_port)
6401 continue;
6402 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6403 #if 0
6404 int ret;
6406 if (!intel_dig_port->dp.can_mst)
6407 continue;
6409 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6410 if (ret != 0) {
6411 intel_dp_check_mst_status(&intel_dig_port->dp);
6413 #endif