drm/i915: Update to Linux 4.7.10
[dragonfly.git] / sys / dev / drm / i915 / i915_irq.c
blob34e589e64a78cd90bc7ace377e393f85939e5412
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3 /*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include <linux/circ_buf.h>
30 #include <drm/drmP.h>
31 #include <drm/i915_drm.h>
32 #include "i915_drv.h"
33 #include "i915_trace.h"
34 #include "intel_drv.h"
36 /**
37 * DOC: interrupt handling
39 * These functions provide the basic support for enabling and disabling the
40 * interrupt handling support. There's a lot more functionality in i915_irq.c
41 * and related files, but that will be described in separate chapters.
44 static const u32 hpd_ilk[HPD_NUM_PINS] = {
45 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
48 static const u32 hpd_ivb[HPD_NUM_PINS] = {
49 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
52 static const u32 hpd_bdw[HPD_NUM_PINS] = {
53 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
56 static const u32 hpd_ibx[HPD_NUM_PINS] = {
57 [HPD_CRT] = SDE_CRT_HOTPLUG,
58 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
59 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
64 static const u32 hpd_cpt[HPD_NUM_PINS] = {
65 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
66 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
67 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
68 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
69 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
72 static const u32 hpd_spt[HPD_NUM_PINS] = {
73 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
74 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
75 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
76 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
77 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
80 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
81 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
82 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
83 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
84 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
85 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
86 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
89 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
90 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
91 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
92 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
93 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
94 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
95 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
98 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
99 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
100 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
101 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
102 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
103 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
104 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
107 /* BXT hpd list */
108 static const u32 hpd_bxt[HPD_NUM_PINS] = {
109 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
110 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
111 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
114 /* IIR can theoretically queue up two events. Be paranoid. */
115 #define GEN8_IRQ_RESET_NDX(type, which) do { \
116 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
117 POSTING_READ(GEN8_##type##_IMR(which)); \
118 I915_WRITE(GEN8_##type##_IER(which), 0); \
119 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
120 POSTING_READ(GEN8_##type##_IIR(which)); \
121 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
122 POSTING_READ(GEN8_##type##_IIR(which)); \
123 } while (0)
125 #define GEN5_IRQ_RESET(type) do { \
126 I915_WRITE(type##IMR, 0xffffffff); \
127 POSTING_READ(type##IMR); \
128 I915_WRITE(type##IER, 0); \
129 I915_WRITE(type##IIR, 0xffffffff); \
130 POSTING_READ(type##IIR); \
131 I915_WRITE(type##IIR, 0xffffffff); \
132 POSTING_READ(type##IIR); \
133 } while (0)
136 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
138 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
139 i915_reg_t reg)
141 u32 val = I915_READ(reg);
143 if (val == 0)
144 return;
146 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
147 i915_mmio_reg_offset(reg), val);
148 I915_WRITE(reg, 0xffffffff);
149 POSTING_READ(reg);
150 I915_WRITE(reg, 0xffffffff);
151 POSTING_READ(reg);
154 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
155 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
156 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
157 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
158 POSTING_READ(GEN8_##type##_IMR(which)); \
159 } while (0)
161 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
162 gen5_assert_iir_is_zero(dev_priv, type##IIR); \
163 I915_WRITE(type##IER, (ier_val)); \
164 I915_WRITE(type##IMR, (imr_val)); \
165 POSTING_READ(type##IMR); \
166 } while (0)
168 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
170 /* For display hotplug interrupt */
171 static inline void
172 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
173 uint32_t mask,
174 uint32_t bits)
176 uint32_t val;
178 assert_spin_locked(&dev_priv->irq_lock);
179 WARN_ON(bits & ~mask);
181 val = I915_READ(PORT_HOTPLUG_EN);
182 val &= ~mask;
183 val |= bits;
184 I915_WRITE(PORT_HOTPLUG_EN, val);
188 * i915_hotplug_interrupt_update - update hotplug interrupt enable
189 * @dev_priv: driver private
190 * @mask: bits to update
191 * @bits: bits to enable
192 * NOTE: the HPD enable bits are modified both inside and outside
193 * of an interrupt context. To avoid that read-modify-write cycles
194 * interfer, these bits are protected by a spinlock. Since this
195 * function is usually not called from a context where the lock is
196 * held already, this function acquires the lock itself. A non-locking
197 * version is also available.
199 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
200 uint32_t mask,
201 uint32_t bits)
203 spin_lock_irq(&dev_priv->irq_lock);
204 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
205 spin_unlock_irq(&dev_priv->irq_lock);
209 * ilk_update_display_irq - update DEIMR
210 * @dev_priv: driver private
211 * @interrupt_mask: mask of interrupt bits to update
212 * @enabled_irq_mask: mask of interrupt bits to enable
214 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
215 uint32_t interrupt_mask,
216 uint32_t enabled_irq_mask)
218 uint32_t new_val;
220 assert_spin_locked(&dev_priv->irq_lock);
222 WARN_ON(enabled_irq_mask & ~interrupt_mask);
224 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
225 return;
227 new_val = dev_priv->irq_mask;
228 new_val &= ~interrupt_mask;
229 new_val |= (~enabled_irq_mask & interrupt_mask);
231 if (new_val != dev_priv->irq_mask) {
232 dev_priv->irq_mask = new_val;
233 I915_WRITE(DEIMR, dev_priv->irq_mask);
234 POSTING_READ(DEIMR);
239 * ilk_update_gt_irq - update GTIMR
240 * @dev_priv: driver private
241 * @interrupt_mask: mask of interrupt bits to update
242 * @enabled_irq_mask: mask of interrupt bits to enable
244 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
245 uint32_t interrupt_mask,
246 uint32_t enabled_irq_mask)
248 assert_spin_locked(&dev_priv->irq_lock);
250 WARN_ON(enabled_irq_mask & ~interrupt_mask);
252 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
253 return;
255 dev_priv->gt_irq_mask &= ~interrupt_mask;
256 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
257 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
258 POSTING_READ(GTIMR);
261 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
263 ilk_update_gt_irq(dev_priv, mask, mask);
266 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
268 ilk_update_gt_irq(dev_priv, mask, 0);
271 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
273 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
276 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
278 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
281 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
283 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
287 * snb_update_pm_irq - update GEN6_PMIMR
288 * @dev_priv: driver private
289 * @interrupt_mask: mask of interrupt bits to update
290 * @enabled_irq_mask: mask of interrupt bits to enable
292 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
293 uint32_t interrupt_mask,
294 uint32_t enabled_irq_mask)
296 uint32_t new_val;
298 WARN_ON(enabled_irq_mask & ~interrupt_mask);
300 assert_spin_locked(&dev_priv->irq_lock);
302 new_val = dev_priv->pm_irq_mask;
303 new_val &= ~interrupt_mask;
304 new_val |= (~enabled_irq_mask & interrupt_mask);
306 if (new_val != dev_priv->pm_irq_mask) {
307 dev_priv->pm_irq_mask = new_val;
308 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
309 POSTING_READ(gen6_pm_imr(dev_priv));
313 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
315 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
316 return;
318 snb_update_pm_irq(dev_priv, mask, mask);
321 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
322 uint32_t mask)
324 snb_update_pm_irq(dev_priv, mask, 0);
327 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
329 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
330 return;
332 __gen6_disable_pm_irq(dev_priv, mask);
335 void gen6_reset_rps_interrupts(struct drm_device *dev)
337 struct drm_i915_private *dev_priv = dev->dev_private;
338 i915_reg_t reg = gen6_pm_iir(dev_priv);
340 spin_lock_irq(&dev_priv->irq_lock);
341 I915_WRITE(reg, dev_priv->pm_rps_events);
342 I915_WRITE(reg, dev_priv->pm_rps_events);
343 POSTING_READ(reg);
344 dev_priv->rps.pm_iir = 0;
345 spin_unlock_irq(&dev_priv->irq_lock);
348 void gen6_enable_rps_interrupts(struct drm_device *dev)
350 struct drm_i915_private *dev_priv = dev->dev_private;
352 spin_lock_irq(&dev_priv->irq_lock);
354 WARN_ON(dev_priv->rps.pm_iir);
355 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
356 dev_priv->rps.interrupts_enabled = true;
357 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
358 dev_priv->pm_rps_events);
359 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
361 spin_unlock_irq(&dev_priv->irq_lock);
364 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
367 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
368 * if GEN6_PM_UP_EI_EXPIRED is masked.
370 * TODO: verify if this can be reproduced on VLV,CHV.
372 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
373 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
375 if (INTEL_INFO(dev_priv)->gen >= 8)
376 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
378 return mask;
381 void gen6_disable_rps_interrupts(struct drm_device *dev)
383 struct drm_i915_private *dev_priv = dev->dev_private;
385 spin_lock_irq(&dev_priv->irq_lock);
386 dev_priv->rps.interrupts_enabled = false;
387 spin_unlock_irq(&dev_priv->irq_lock);
389 cancel_work_sync(&dev_priv->rps.work);
391 spin_lock_irq(&dev_priv->irq_lock);
393 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
395 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
396 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
397 ~dev_priv->pm_rps_events);
399 spin_unlock_irq(&dev_priv->irq_lock);
401 synchronize_irq(dev->irq);
405 * bdw_update_port_irq - update DE port interrupt
406 * @dev_priv: driver private
407 * @interrupt_mask: mask of interrupt bits to update
408 * @enabled_irq_mask: mask of interrupt bits to enable
410 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
411 uint32_t interrupt_mask,
412 uint32_t enabled_irq_mask)
414 uint32_t new_val;
415 uint32_t old_val;
417 assert_spin_locked(&dev_priv->irq_lock);
419 WARN_ON(enabled_irq_mask & ~interrupt_mask);
421 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
422 return;
424 old_val = I915_READ(GEN8_DE_PORT_IMR);
426 new_val = old_val;
427 new_val &= ~interrupt_mask;
428 new_val |= (~enabled_irq_mask & interrupt_mask);
430 if (new_val != old_val) {
431 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
432 POSTING_READ(GEN8_DE_PORT_IMR);
437 * bdw_update_pipe_irq - update DE pipe interrupt
438 * @dev_priv: driver private
439 * @pipe: pipe whose interrupt to update
440 * @interrupt_mask: mask of interrupt bits to update
441 * @enabled_irq_mask: mask of interrupt bits to enable
443 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
444 enum i915_pipe pipe,
445 uint32_t interrupt_mask,
446 uint32_t enabled_irq_mask)
448 uint32_t new_val;
450 assert_spin_locked(&dev_priv->irq_lock);
452 WARN_ON(enabled_irq_mask & ~interrupt_mask);
454 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
455 return;
457 new_val = dev_priv->de_irq_mask[pipe];
458 new_val &= ~interrupt_mask;
459 new_val |= (~enabled_irq_mask & interrupt_mask);
461 if (new_val != dev_priv->de_irq_mask[pipe]) {
462 dev_priv->de_irq_mask[pipe] = new_val;
463 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
464 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
469 * ibx_display_interrupt_update - update SDEIMR
470 * @dev_priv: driver private
471 * @interrupt_mask: mask of interrupt bits to update
472 * @enabled_irq_mask: mask of interrupt bits to enable
474 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
475 uint32_t interrupt_mask,
476 uint32_t enabled_irq_mask)
478 uint32_t sdeimr = I915_READ(SDEIMR);
479 sdeimr &= ~interrupt_mask;
480 sdeimr |= (~enabled_irq_mask & interrupt_mask);
482 WARN_ON(enabled_irq_mask & ~interrupt_mask);
484 assert_spin_locked(&dev_priv->irq_lock);
486 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
487 return;
489 I915_WRITE(SDEIMR, sdeimr);
490 POSTING_READ(SDEIMR);
493 static void
494 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
495 u32 enable_mask, u32 status_mask)
497 i915_reg_t reg = PIPESTAT(pipe);
498 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
500 assert_spin_locked(&dev_priv->irq_lock);
501 WARN_ON(!intel_irqs_enabled(dev_priv));
503 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
504 status_mask & ~PIPESTAT_INT_STATUS_MASK,
505 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
506 pipe_name(pipe), enable_mask, status_mask))
507 return;
509 if ((pipestat & enable_mask) == enable_mask)
510 return;
512 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
514 /* Enable the interrupt, clear any pending status */
515 pipestat |= enable_mask | status_mask;
516 I915_WRITE(reg, pipestat);
517 POSTING_READ(reg);
520 static void
521 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
522 u32 enable_mask, u32 status_mask)
524 i915_reg_t reg = PIPESTAT(pipe);
525 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
527 assert_spin_locked(&dev_priv->irq_lock);
528 WARN_ON(!intel_irqs_enabled(dev_priv));
530 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
531 status_mask & ~PIPESTAT_INT_STATUS_MASK,
532 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
533 pipe_name(pipe), enable_mask, status_mask))
534 return;
536 if ((pipestat & enable_mask) == 0)
537 return;
539 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
541 pipestat &= ~enable_mask;
542 I915_WRITE(reg, pipestat);
543 POSTING_READ(reg);
546 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
548 u32 enable_mask = status_mask << 16;
551 * On pipe A we don't support the PSR interrupt yet,
552 * on pipe B and C the same bit MBZ.
554 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
555 return 0;
557 * On pipe B and C we don't support the PSR interrupt yet, on pipe
558 * A the same bit is for perf counters which we don't use either.
560 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
561 return 0;
563 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
564 SPRITE0_FLIP_DONE_INT_EN_VLV |
565 SPRITE1_FLIP_DONE_INT_EN_VLV);
566 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
567 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
568 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
569 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
571 return enable_mask;
574 void
575 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
576 u32 status_mask)
578 u32 enable_mask;
580 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
581 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
582 status_mask);
583 else
584 enable_mask = status_mask << 16;
585 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
588 void
589 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
590 u32 status_mask)
592 u32 enable_mask;
594 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
595 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
596 status_mask);
597 else
598 enable_mask = status_mask << 16;
599 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
603 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
604 * @dev: drm device
606 static void i915_enable_asle_pipestat(struct drm_device *dev)
608 struct drm_i915_private *dev_priv = dev->dev_private;
610 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
611 return;
613 spin_lock_irq(&dev_priv->irq_lock);
615 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
616 if (INTEL_INFO(dev)->gen >= 4)
617 i915_enable_pipestat(dev_priv, PIPE_A,
618 PIPE_LEGACY_BLC_EVENT_STATUS);
620 spin_unlock_irq(&dev_priv->irq_lock);
624 * This timing diagram depicts the video signal in and
625 * around the vertical blanking period.
627 * Assumptions about the fictitious mode used in this example:
628 * vblank_start >= 3
629 * vsync_start = vblank_start + 1
630 * vsync_end = vblank_start + 2
631 * vtotal = vblank_start + 3
633 * start of vblank:
634 * latch double buffered registers
635 * increment frame counter (ctg+)
636 * generate start of vblank interrupt (gen4+)
638 * | frame start:
639 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
640 * | may be shifted forward 1-3 extra lines via PIPECONF
641 * | |
642 * | | start of vsync:
643 * | | generate vsync interrupt
644 * | | |
645 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
646 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
647 * ----va---> <-----------------vb--------------------> <--------va-------------
648 * | | <----vs-----> |
649 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
650 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
651 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
652 * | | |
653 * last visible pixel first visible pixel
654 * | increment frame counter (gen3/4)
655 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
657 * x = horizontal active
658 * _ = horizontal blanking
659 * hs = horizontal sync
660 * va = vertical active
661 * vb = vertical blanking
662 * vs = vertical sync
663 * vbs = vblank_start (number)
665 * Summary:
666 * - most events happen at the start of horizontal sync
667 * - frame start happens at the start of horizontal blank, 1-4 lines
668 * (depending on PIPECONF settings) after the start of vblank
669 * - gen3/4 pixel and frame counter are synchronized with the start
670 * of horizontal active on the first line of vertical active
673 static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
675 /* Gen2 doesn't have a hardware frame counter */
676 return 0;
679 /* Called from drm generic code, passed a 'crtc', which
680 * we use as a pipe index
682 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
684 struct drm_i915_private *dev_priv = dev->dev_private;
685 i915_reg_t high_frame, low_frame;
686 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
687 struct intel_crtc *intel_crtc =
688 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
689 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
691 htotal = mode->crtc_htotal;
692 hsync_start = mode->crtc_hsync_start;
693 vbl_start = mode->crtc_vblank_start;
694 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
695 vbl_start = DIV_ROUND_UP(vbl_start, 2);
697 /* Convert to pixel count */
698 vbl_start *= htotal;
700 /* Start of vblank event occurs at start of hsync */
701 vbl_start -= htotal - hsync_start;
703 high_frame = PIPEFRAME(pipe);
704 low_frame = PIPEFRAMEPIXEL(pipe);
707 * High & low register fields aren't synchronized, so make sure
708 * we get a low value that's stable across two reads of the high
709 * register.
711 do {
712 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
713 low = I915_READ(low_frame);
714 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
715 } while (high1 != high2);
717 high1 >>= PIPE_FRAME_HIGH_SHIFT;
718 pixel = low & PIPE_PIXEL_MASK;
719 low >>= PIPE_FRAME_LOW_SHIFT;
722 * The frame counter increments at beginning of active.
723 * Cook up a vblank counter by also checking the pixel
724 * counter against vblank start.
726 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
729 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
731 struct drm_i915_private *dev_priv = dev->dev_private;
733 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
736 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
737 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
739 struct drm_device *dev = crtc->base.dev;
740 struct drm_i915_private *dev_priv = dev->dev_private;
741 const struct drm_display_mode *mode = &crtc->base.hwmode;
742 enum i915_pipe pipe = crtc->pipe;
743 int position, vtotal;
745 vtotal = mode->crtc_vtotal;
746 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
747 vtotal /= 2;
749 if (IS_GEN2(dev))
750 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
751 else
752 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
755 * On HSW, the DSL reg (0x70000) appears to return 0 if we
756 * read it just before the start of vblank. So try it again
757 * so we don't accidentally end up spanning a vblank frame
758 * increment, causing the pipe_update_end() code to squak at us.
760 * The nature of this problem means we can't simply check the ISR
761 * bit and return the vblank start value; nor can we use the scanline
762 * debug register in the transcoder as it appears to have the same
763 * problem. We may need to extend this to include other platforms,
764 * but so far testing only shows the problem on HSW.
766 if (HAS_DDI(dev) && !position) {
767 int i, temp;
769 for (i = 0; i < 100; i++) {
770 udelay(1);
771 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
772 DSL_LINEMASK_GEN3;
773 if (temp != position) {
774 position = temp;
775 break;
781 * See update_scanline_offset() for the details on the
782 * scanline_offset adjustment.
784 return (position + crtc->scanline_offset) % vtotal;
787 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
788 unsigned int flags, int *vpos, int *hpos,
789 ktime_t *stime, ktime_t *etime,
790 const struct drm_display_mode *mode)
792 struct drm_i915_private *dev_priv = dev->dev_private;
793 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
794 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
795 int position;
796 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
797 bool in_vbl = true;
798 int ret = 0;
799 unsigned long irqflags;
801 if (WARN_ON(!mode->crtc_clock)) {
802 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
803 "pipe %c\n", pipe_name(pipe));
804 return 0;
807 htotal = mode->crtc_htotal;
808 hsync_start = mode->crtc_hsync_start;
809 vtotal = mode->crtc_vtotal;
810 vbl_start = mode->crtc_vblank_start;
811 vbl_end = mode->crtc_vblank_end;
813 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
814 vbl_start = DIV_ROUND_UP(vbl_start, 2);
815 vbl_end /= 2;
816 vtotal /= 2;
819 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
822 * Lock uncore.lock, as we will do multiple timing critical raw
823 * register reads, potentially with preemption disabled, so the
824 * following code must not block on uncore.lock.
826 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
828 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
830 /* Get optional system timestamp before query. */
831 if (stime)
832 *stime = ktime_get();
834 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
835 /* No obvious pixelcount register. Only query vertical
836 * scanout position from Display scan line register.
838 position = __intel_get_crtc_scanline(intel_crtc);
839 } else {
840 /* Have access to pixelcount since start of frame.
841 * We can split this into vertical and horizontal
842 * scanout position.
844 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
846 /* convert to pixel counts */
847 vbl_start *= htotal;
848 vbl_end *= htotal;
849 vtotal *= htotal;
852 * In interlaced modes, the pixel counter counts all pixels,
853 * so one field will have htotal more pixels. In order to avoid
854 * the reported position from jumping backwards when the pixel
855 * counter is beyond the length of the shorter field, just
856 * clamp the position the length of the shorter field. This
857 * matches how the scanline counter based position works since
858 * the scanline counter doesn't count the two half lines.
860 if (position >= vtotal)
861 position = vtotal - 1;
864 * Start of vblank interrupt is triggered at start of hsync,
865 * just prior to the first active line of vblank. However we
866 * consider lines to start at the leading edge of horizontal
867 * active. So, should we get here before we've crossed into
868 * the horizontal active of the first line in vblank, we would
869 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
870 * always add htotal-hsync_start to the current pixel position.
872 position = (position + htotal - hsync_start) % vtotal;
875 /* Get optional system timestamp after query. */
876 if (etime)
877 *etime = ktime_get();
879 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
881 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
883 in_vbl = position >= vbl_start && position < vbl_end;
886 * While in vblank, position will be negative
887 * counting up towards 0 at vbl_end. And outside
888 * vblank, position will be positive counting
889 * up since vbl_end.
891 if (position >= vbl_start)
892 position -= vbl_end;
893 else
894 position += vtotal - vbl_end;
896 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
897 *vpos = position;
898 *hpos = 0;
899 } else {
900 *vpos = position / htotal;
901 *hpos = position - (*vpos * htotal);
904 /* In vblank? */
905 if (in_vbl)
906 ret |= DRM_SCANOUTPOS_IN_VBLANK;
908 return ret;
911 int intel_get_crtc_scanline(struct intel_crtc *crtc)
913 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
914 unsigned long irqflags;
915 int position;
917 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
918 position = __intel_get_crtc_scanline(crtc);
919 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
921 return position;
924 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
925 int *max_error,
926 struct timeval *vblank_time,
927 unsigned flags)
929 struct drm_crtc *crtc;
931 if (pipe >= INTEL_INFO(dev)->num_pipes) {
932 DRM_ERROR("Invalid crtc %u\n", pipe);
933 return -EINVAL;
936 /* Get drm_crtc to timestamp: */
937 crtc = intel_get_crtc_for_pipe(dev, pipe);
938 if (crtc == NULL) {
939 DRM_ERROR("Invalid crtc %u\n", pipe);
940 return -EINVAL;
943 if (!crtc->hwmode.crtc_clock) {
944 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
945 return -EBUSY;
948 /* Helper routine in DRM core does all the work: */
949 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
950 vblank_time, flags,
951 &crtc->hwmode);
954 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
956 struct drm_i915_private *dev_priv = dev->dev_private;
957 u32 busy_up, busy_down, max_avg, min_avg;
958 u8 new_delay;
960 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
962 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
964 new_delay = dev_priv->ips.cur_delay;
966 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
967 busy_up = I915_READ(RCPREVBSYTUPAVG);
968 busy_down = I915_READ(RCPREVBSYTDNAVG);
969 max_avg = I915_READ(RCBMAXAVG);
970 min_avg = I915_READ(RCBMINAVG);
972 /* Handle RCS change request from hw */
973 if (busy_up > max_avg) {
974 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
975 new_delay = dev_priv->ips.cur_delay - 1;
976 if (new_delay < dev_priv->ips.max_delay)
977 new_delay = dev_priv->ips.max_delay;
978 } else if (busy_down < min_avg) {
979 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
980 new_delay = dev_priv->ips.cur_delay + 1;
981 if (new_delay > dev_priv->ips.min_delay)
982 new_delay = dev_priv->ips.min_delay;
985 if (ironlake_set_drps(dev, new_delay))
986 dev_priv->ips.cur_delay = new_delay;
988 lockmgr(&mchdev_lock, LK_RELEASE);
990 return;
993 static void notify_ring(struct intel_engine_cs *engine)
995 if (!intel_engine_initialized(engine))
996 return;
998 trace_i915_gem_request_notify(engine);
999 engine->user_interrupts++;
1001 wake_up_all(&engine->irq_queue);
1004 static void vlv_c0_read(struct drm_i915_private *dev_priv,
1005 struct intel_rps_ei *ei)
1007 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1008 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1009 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1012 static bool vlv_c0_above(struct drm_i915_private *dev_priv,
1013 const struct intel_rps_ei *old,
1014 const struct intel_rps_ei *now,
1015 int threshold)
1017 u64 time, c0;
1018 unsigned int mul = 100;
1020 if (old->cz_clock == 0)
1021 return false;
1023 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1024 mul <<= 8;
1026 time = now->cz_clock - old->cz_clock;
1027 time *= threshold * dev_priv->czclk_freq;
1029 /* Workload can be split between render + media, e.g. SwapBuffers
1030 * being blitted in X after being rendered in mesa. To account for
1031 * this we need to combine both engines into our activity counter.
1033 c0 = now->render_c0 - old->render_c0;
1034 c0 += now->media_c0 - old->media_c0;
1035 c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1037 return c0 >= time;
1040 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1042 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1043 dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1046 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1048 struct intel_rps_ei now;
1049 u32 events = 0;
1051 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1052 return 0;
1054 vlv_c0_read(dev_priv, &now);
1055 if (now.cz_clock == 0)
1056 return 0;
1058 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1059 if (!vlv_c0_above(dev_priv,
1060 &dev_priv->rps.down_ei, &now,
1061 dev_priv->rps.down_threshold))
1062 events |= GEN6_PM_RP_DOWN_THRESHOLD;
1063 dev_priv->rps.down_ei = now;
1066 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1067 if (vlv_c0_above(dev_priv,
1068 &dev_priv->rps.up_ei, &now,
1069 dev_priv->rps.up_threshold))
1070 events |= GEN6_PM_RP_UP_THRESHOLD;
1071 dev_priv->rps.up_ei = now;
1074 return events;
1077 static bool any_waiters(struct drm_i915_private *dev_priv)
1079 struct intel_engine_cs *engine;
1081 for_each_engine(engine, dev_priv)
1082 if (engine->irq_refcount)
1083 return true;
1085 return false;
1088 static void gen6_pm_rps_work(struct work_struct *work)
1090 struct drm_i915_private *dev_priv =
1091 container_of(work, struct drm_i915_private, rps.work);
1092 bool client_boost;
1093 int new_delay, adj, min, max;
1094 u32 pm_iir;
1096 spin_lock_irq(&dev_priv->irq_lock);
1097 /* Speed up work cancelation during disabling rps interrupts. */
1098 if (!dev_priv->rps.interrupts_enabled) {
1099 spin_unlock_irq(&dev_priv->irq_lock);
1100 return;
1104 * The RPS work is synced during runtime suspend, we don't require a
1105 * wakeref. TODO: instead of disabling the asserts make sure that we
1106 * always hold an RPM reference while the work is running.
1108 DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
1110 pm_iir = dev_priv->rps.pm_iir;
1111 dev_priv->rps.pm_iir = 0;
1112 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1113 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1114 client_boost = dev_priv->rps.client_boost;
1115 dev_priv->rps.client_boost = false;
1116 spin_unlock_irq(&dev_priv->irq_lock);
1118 /* Make sure we didn't queue anything we're not going to process. */
1119 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1121 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1122 goto out;
1124 mutex_lock(&dev_priv->rps.hw_lock);
1126 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1128 adj = dev_priv->rps.last_adj;
1129 new_delay = dev_priv->rps.cur_freq;
1130 min = dev_priv->rps.min_freq_softlimit;
1131 max = dev_priv->rps.max_freq_softlimit;
1133 if (client_boost) {
1134 new_delay = dev_priv->rps.max_freq_softlimit;
1135 adj = 0;
1136 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1137 if (adj > 0)
1138 adj *= 2;
1139 else /* CHV needs even encode values */
1140 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1142 * For better performance, jump directly
1143 * to RPe if we're below it.
1145 if (new_delay < dev_priv->rps.efficient_freq - adj) {
1146 new_delay = dev_priv->rps.efficient_freq;
1147 adj = 0;
1149 } else if (any_waiters(dev_priv)) {
1150 adj = 0;
1151 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1152 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1153 new_delay = dev_priv->rps.efficient_freq;
1154 else
1155 new_delay = dev_priv->rps.min_freq_softlimit;
1156 adj = 0;
1157 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1158 if (adj < 0)
1159 adj *= 2;
1160 else /* CHV needs even encode values */
1161 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1162 } else { /* unknown event */
1163 adj = 0;
1166 dev_priv->rps.last_adj = adj;
1168 /* sysfs frequency interfaces may have snuck in while servicing the
1169 * interrupt
1171 new_delay += adj;
1172 new_delay = clamp_t(int, new_delay, min, max);
1174 intel_set_rps(dev_priv->dev, new_delay);
1176 mutex_unlock(&dev_priv->rps.hw_lock);
1177 out:
1178 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
1183 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1184 * occurred.
1185 * @work: workqueue struct
1187 * Doesn't actually do anything except notify userspace. As a consequence of
1188 * this event, userspace should try to remap the bad rows since statistically
1189 * it is likely the same row is more likely to go bad again.
1191 static void ivybridge_parity_work(struct work_struct *work)
1193 struct drm_i915_private *dev_priv =
1194 container_of(work, struct drm_i915_private, l3_parity.error_work);
1195 u32 error_status, row, bank, subbank;
1196 char *parity_event[6];
1197 uint32_t misccpctl;
1198 uint8_t slice = 0;
1200 /* We must turn off DOP level clock gating to access the L3 registers.
1201 * In order to prevent a get/put style interface, acquire struct mutex
1202 * any time we access those registers.
1204 mutex_lock(&dev_priv->dev->struct_mutex);
1206 /* If we've screwed up tracking, just let the interrupt fire again */
1207 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1208 goto out;
1210 misccpctl = I915_READ(GEN7_MISCCPCTL);
1211 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1212 POSTING_READ(GEN7_MISCCPCTL);
1214 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1215 i915_reg_t reg;
1217 slice--;
1218 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1219 break;
1221 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1223 reg = GEN7_L3CDERRST1(slice);
1225 error_status = I915_READ(reg);
1226 row = GEN7_PARITY_ERROR_ROW(error_status);
1227 bank = GEN7_PARITY_ERROR_BANK(error_status);
1228 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1230 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1231 POSTING_READ(reg);
1233 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1234 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1235 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1236 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1237 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1238 parity_event[5] = NULL;
1240 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1241 KOBJ_CHANGE, parity_event);
1243 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1244 slice, row, bank, subbank);
1246 kfree(parity_event[4]);
1247 kfree(parity_event[3]);
1248 kfree(parity_event[2]);
1249 kfree(parity_event[1]);
1252 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1254 out:
1255 WARN_ON(dev_priv->l3_parity.which_slice);
1256 spin_lock_irq(&dev_priv->irq_lock);
1257 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1258 spin_unlock_irq(&dev_priv->irq_lock);
1260 mutex_unlock(&dev_priv->dev->struct_mutex);
1263 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1264 u32 iir)
1266 if (!HAS_L3_DPF(dev_priv))
1267 return;
1269 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1270 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1271 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1273 iir &= GT_PARITY_ERROR(dev_priv);
1274 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1275 dev_priv->l3_parity.which_slice |= 1 << 1;
1277 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1278 dev_priv->l3_parity.which_slice |= 1 << 0;
1280 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1283 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1284 u32 gt_iir)
1286 if (gt_iir &
1287 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1288 notify_ring(&dev_priv->engine[RCS]);
1289 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1290 notify_ring(&dev_priv->engine[VCS]);
1293 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1294 u32 gt_iir)
1297 if (gt_iir &
1298 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1299 notify_ring(&dev_priv->engine[RCS]);
1300 if (gt_iir & GT_BSD_USER_INTERRUPT)
1301 notify_ring(&dev_priv->engine[VCS]);
1302 if (gt_iir & GT_BLT_USER_INTERRUPT)
1303 notify_ring(&dev_priv->engine[BCS]);
1305 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1306 GT_BSD_CS_ERROR_INTERRUPT |
1307 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1308 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1310 if (gt_iir & GT_PARITY_ERROR(dev_priv))
1311 ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1314 static __always_inline void
1315 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
1317 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
1318 notify_ring(engine);
1319 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
1320 tasklet_schedule(&engine->irq_tasklet);
1323 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
1324 u32 master_ctl,
1325 u32 gt_iir[4])
1328 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1329 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
1330 if (gt_iir[0]) {
1331 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
1332 } else
1333 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1336 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1337 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
1338 if (gt_iir[1]) {
1339 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
1340 } else
1341 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1344 if (master_ctl & GEN8_GT_VECS_IRQ) {
1345 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
1346 if (gt_iir[3]) {
1347 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
1348 } else
1349 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1352 if (master_ctl & GEN8_GT_PM_IRQ) {
1353 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
1354 if (gt_iir[2] & dev_priv->pm_rps_events) {
1355 I915_WRITE_FW(GEN8_GT_IIR(2),
1356 gt_iir[2] & dev_priv->pm_rps_events);
1357 } else
1358 DRM_ERROR("The master control interrupt lied (PM)!\n");
1363 static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1364 u32 gt_iir[4])
1366 if (gt_iir[0]) {
1367 gen8_cs_irq_handler(&dev_priv->engine[RCS],
1368 gt_iir[0], GEN8_RCS_IRQ_SHIFT);
1369 gen8_cs_irq_handler(&dev_priv->engine[BCS],
1370 gt_iir[0], GEN8_BCS_IRQ_SHIFT);
1373 if (gt_iir[1]) {
1374 gen8_cs_irq_handler(&dev_priv->engine[VCS],
1375 gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
1376 gen8_cs_irq_handler(&dev_priv->engine[VCS2],
1377 gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
1380 if (gt_iir[3])
1381 gen8_cs_irq_handler(&dev_priv->engine[VECS],
1382 gt_iir[3], GEN8_VECS_IRQ_SHIFT);
1384 if (gt_iir[2] & dev_priv->pm_rps_events)
1385 gen6_rps_irq_handler(dev_priv, gt_iir[2]);
1388 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1390 switch (port) {
1391 case PORT_A:
1392 return val & PORTA_HOTPLUG_LONG_DETECT;
1393 case PORT_B:
1394 return val & PORTB_HOTPLUG_LONG_DETECT;
1395 case PORT_C:
1396 return val & PORTC_HOTPLUG_LONG_DETECT;
1397 default:
1398 return false;
1402 static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1404 switch (port) {
1405 case PORT_E:
1406 return val & PORTE_HOTPLUG_LONG_DETECT;
1407 default:
1408 return false;
1412 static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1414 switch (port) {
1415 case PORT_A:
1416 return val & PORTA_HOTPLUG_LONG_DETECT;
1417 case PORT_B:
1418 return val & PORTB_HOTPLUG_LONG_DETECT;
1419 case PORT_C:
1420 return val & PORTC_HOTPLUG_LONG_DETECT;
1421 case PORT_D:
1422 return val & PORTD_HOTPLUG_LONG_DETECT;
1423 default:
1424 return false;
1428 static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1430 switch (port) {
1431 case PORT_A:
1432 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1433 default:
1434 return false;
1438 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1440 switch (port) {
1441 case PORT_B:
1442 return val & PORTB_HOTPLUG_LONG_DETECT;
1443 case PORT_C:
1444 return val & PORTC_HOTPLUG_LONG_DETECT;
1445 case PORT_D:
1446 return val & PORTD_HOTPLUG_LONG_DETECT;
1447 default:
1448 return false;
1452 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1454 switch (port) {
1455 case PORT_B:
1456 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1457 case PORT_C:
1458 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1459 case PORT_D:
1460 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1461 default:
1462 return false;
1467 * Get a bit mask of pins that have triggered, and which ones may be long.
1468 * This can be called multiple times with the same masks to accumulate
1469 * hotplug detection results from several registers.
1471 * Note that the caller is expected to zero out the masks initially.
1473 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1474 u32 hotplug_trigger, u32 dig_hotplug_reg,
1475 const u32 hpd[HPD_NUM_PINS],
1476 bool long_pulse_detect(enum port port, u32 val))
1478 enum port port;
1479 int i;
1481 for_each_hpd_pin(i) {
1482 if ((hpd[i] & hotplug_trigger) == 0)
1483 continue;
1485 *pin_mask |= BIT(i);
1487 if (!intel_hpd_pin_to_port(i, &port))
1488 continue;
1490 if (long_pulse_detect(port, dig_hotplug_reg))
1491 *long_mask |= BIT(i);
1494 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1495 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1499 static void gmbus_irq_handler(struct drm_device *dev)
1501 struct drm_i915_private *dev_priv = dev->dev_private;
1503 wake_up_all(&dev_priv->gmbus_wait_queue);
1506 static void dp_aux_irq_handler(struct drm_device *dev)
1508 struct drm_i915_private *dev_priv = dev->dev_private;
1510 wake_up_all(&dev_priv->gmbus_wait_queue);
1513 #if defined(CONFIG_DEBUG_FS)
1514 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe,
1515 uint32_t crc0, uint32_t crc1,
1516 uint32_t crc2, uint32_t crc3,
1517 uint32_t crc4)
1519 struct drm_i915_private *dev_priv = dev->dev_private;
1520 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1521 struct intel_pipe_crc_entry *entry;
1522 int head, tail;
1524 spin_lock(&pipe_crc->lock);
1526 if (!pipe_crc->entries) {
1527 spin_unlock(&pipe_crc->lock);
1528 DRM_DEBUG_KMS("spurious interrupt\n");
1529 return;
1532 head = pipe_crc->head;
1533 tail = pipe_crc->tail;
1535 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1536 spin_unlock(&pipe_crc->lock);
1537 DRM_ERROR("CRC buffer overflowing\n");
1538 return;
1541 entry = &pipe_crc->entries[head];
1543 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1544 entry->crc[0] = crc0;
1545 entry->crc[1] = crc1;
1546 entry->crc[2] = crc2;
1547 entry->crc[3] = crc3;
1548 entry->crc[4] = crc4;
1550 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1551 pipe_crc->head = head;
1553 spin_unlock(&pipe_crc->lock);
1555 wake_up_interruptible(&pipe_crc->wq);
1557 #else
1558 static inline void
1559 display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe,
1560 uint32_t crc0, uint32_t crc1,
1561 uint32_t crc2, uint32_t crc3,
1562 uint32_t crc4) {}
1563 #endif
1566 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1568 struct drm_i915_private *dev_priv = dev->dev_private;
1570 display_pipe_crc_irq_handler(dev, pipe,
1571 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1572 0, 0, 0, 0);
1575 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1577 struct drm_i915_private *dev_priv = dev->dev_private;
1579 display_pipe_crc_irq_handler(dev, pipe,
1580 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1581 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1582 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1583 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1584 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1587 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1589 struct drm_i915_private *dev_priv = dev->dev_private;
1590 uint32_t res1, res2;
1592 if (INTEL_INFO(dev)->gen >= 3)
1593 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1594 else
1595 res1 = 0;
1597 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1598 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1599 else
1600 res2 = 0;
1602 display_pipe_crc_irq_handler(dev, pipe,
1603 I915_READ(PIPE_CRC_RES_RED(pipe)),
1604 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1605 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1606 res1, res2);
1609 /* The RPS events need forcewake, so we add them to a work queue and mask their
1610 * IMR bits until the work is done. Other interrupts can be processed without
1611 * the work queue. */
1612 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1614 if (pm_iir & dev_priv->pm_rps_events) {
1615 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1616 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1617 if (dev_priv->rps.interrupts_enabled) {
1618 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1619 queue_work(dev_priv->wq, &dev_priv->rps.work);
1621 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1624 if (INTEL_INFO(dev_priv)->gen >= 8)
1625 return;
1627 if (HAS_VEBOX(dev_priv)) {
1628 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1629 notify_ring(&dev_priv->engine[VECS]);
1631 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1632 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1636 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum i915_pipe pipe)
1638 if (!drm_handle_vblank(dev, pipe))
1639 return false;
1641 return true;
1644 static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir,
1645 u32 pipe_stats[I915_MAX_PIPES])
1647 struct drm_i915_private *dev_priv = dev->dev_private;
1648 int pipe;
1650 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1652 if (!dev_priv->display_irqs_enabled) {
1653 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1654 return;
1657 for_each_pipe(dev_priv, pipe) {
1658 i915_reg_t reg;
1659 u32 mask, iir_bit = 0;
1662 * PIPESTAT bits get signalled even when the interrupt is
1663 * disabled with the mask bits, and some of the status bits do
1664 * not generate interrupts at all (like the underrun bit). Hence
1665 * we need to be careful that we only handle what we want to
1666 * handle.
1669 /* fifo underruns are filterered in the underrun handler. */
1670 mask = PIPE_FIFO_UNDERRUN_STATUS;
1672 switch (pipe) {
1673 case PIPE_A:
1674 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1675 break;
1676 case PIPE_B:
1677 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1678 break;
1679 case PIPE_C:
1680 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1681 break;
1683 if (iir & iir_bit)
1684 mask |= dev_priv->pipestat_irq_mask[pipe];
1686 if (!mask)
1687 continue;
1689 reg = PIPESTAT(pipe);
1690 mask |= PIPESTAT_INT_ENABLE_MASK;
1691 pipe_stats[pipe] = I915_READ(reg) & mask;
1694 * Clear the PIPE*STAT regs before the IIR
1696 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1697 PIPESTAT_INT_STATUS_MASK))
1698 I915_WRITE(reg, pipe_stats[pipe]);
1700 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1703 static void valleyview_pipestat_irq_handler(struct drm_device *dev,
1704 u32 pipe_stats[I915_MAX_PIPES])
1706 struct drm_i915_private *dev_priv = to_i915(dev);
1707 enum i915_pipe pipe;
1709 for_each_pipe(dev_priv, pipe) {
1710 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1711 intel_pipe_handle_vblank(dev, pipe))
1712 intel_check_page_flip(dev, pipe);
1714 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1715 intel_prepare_page_flip(dev, pipe);
1716 intel_finish_page_flip(dev, pipe);
1719 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1720 i9xx_pipe_crc_irq_handler(dev, pipe);
1722 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1723 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1726 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1727 gmbus_irq_handler(dev);
1730 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1732 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1734 if (hotplug_status)
1735 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1737 return hotplug_status;
1740 static void i9xx_hpd_irq_handler(struct drm_device *dev,
1741 u32 hotplug_status)
1743 u32 pin_mask = 0, long_mask = 0;
1745 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1746 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1748 if (hotplug_trigger) {
1749 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1750 hotplug_trigger, hpd_status_g4x,
1751 i9xx_port_hotplug_long_detect);
1753 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1756 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1757 dp_aux_irq_handler(dev);
1758 } else {
1759 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1761 if (hotplug_trigger) {
1762 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1763 hotplug_trigger, hpd_status_i915,
1764 i9xx_port_hotplug_long_detect);
1765 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1770 static irqreturn_t valleyview_irq_handler(void *arg)
1772 struct drm_device *dev = arg;
1773 struct drm_i915_private *dev_priv = dev->dev_private;
1775 if (!intel_irqs_enabled(dev_priv))
1776 return IRQ_NONE;
1778 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1779 disable_rpm_wakeref_asserts(dev_priv);
1781 do {
1782 u32 iir, gt_iir, pm_iir;
1783 u32 pipe_stats[I915_MAX_PIPES] = {};
1784 u32 hotplug_status = 0;
1785 u32 ier = 0;
1787 gt_iir = I915_READ(GTIIR);
1788 pm_iir = I915_READ(GEN6_PMIIR);
1789 iir = I915_READ(VLV_IIR);
1791 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1792 break;
1796 * Theory on interrupt generation, based on empirical evidence:
1798 * x = ((VLV_IIR & VLV_IER) ||
1799 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1800 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1802 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1803 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1804 * guarantee the CPU interrupt will be raised again even if we
1805 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1806 * bits this time around.
1808 I915_WRITE(VLV_MASTER_IER, 0);
1809 ier = I915_READ(VLV_IER);
1810 I915_WRITE(VLV_IER, 0);
1812 if (gt_iir)
1813 I915_WRITE(GTIIR, gt_iir);
1814 if (pm_iir)
1815 I915_WRITE(GEN6_PMIIR, pm_iir);
1817 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1818 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1820 /* Call regardless, as some status bits might not be
1821 * signalled in iir */
1822 valleyview_pipestat_irq_ack(dev, iir, pipe_stats);
1825 * VLV_IIR is single buffered, and reflects the level
1826 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1828 if (iir)
1829 I915_WRITE(VLV_IIR, iir);
1831 I915_WRITE(VLV_IER, ier);
1832 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1833 POSTING_READ(VLV_MASTER_IER);
1835 if (gt_iir)
1836 snb_gt_irq_handler(dev_priv, gt_iir);
1837 if (pm_iir)
1838 gen6_rps_irq_handler(dev_priv, pm_iir);
1840 if (hotplug_status)
1841 i9xx_hpd_irq_handler(dev, hotplug_status);
1843 valleyview_pipestat_irq_handler(dev, pipe_stats);
1844 } while (0);
1846 enable_rpm_wakeref_asserts(dev_priv);
1850 static irqreturn_t cherryview_irq_handler(void *arg)
1852 struct drm_device *dev = arg;
1853 struct drm_i915_private *dev_priv = dev->dev_private;
1855 if (!intel_irqs_enabled(dev_priv))
1856 return IRQ_NONE;
1858 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1859 disable_rpm_wakeref_asserts(dev_priv);
1861 do {
1862 u32 master_ctl, iir;
1863 u32 gt_iir[4] = {};
1864 u32 pipe_stats[I915_MAX_PIPES] = {};
1865 u32 hotplug_status = 0;
1866 u32 ier = 0;
1868 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1869 iir = I915_READ(VLV_IIR);
1871 if (master_ctl == 0 && iir == 0)
1872 break;
1876 * Theory on interrupt generation, based on empirical evidence:
1878 * x = ((VLV_IIR & VLV_IER) ||
1879 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1880 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1882 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1883 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1884 * guarantee the CPU interrupt will be raised again even if we
1885 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1886 * bits this time around.
1888 I915_WRITE(GEN8_MASTER_IRQ, 0);
1889 ier = I915_READ(VLV_IER);
1890 I915_WRITE(VLV_IER, 0);
1892 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
1894 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1895 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1897 /* Call regardless, as some status bits might not be
1898 * signalled in iir */
1899 valleyview_pipestat_irq_ack(dev, iir, pipe_stats);
1902 * VLV_IIR is single buffered, and reflects the level
1903 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1905 if (iir)
1906 I915_WRITE(VLV_IIR, iir);
1908 I915_WRITE(VLV_IER, ier);
1909 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1910 POSTING_READ(GEN8_MASTER_IRQ);
1912 gen8_gt_irq_handler(dev_priv, gt_iir);
1914 if (hotplug_status)
1915 i9xx_hpd_irq_handler(dev, hotplug_status);
1917 valleyview_pipestat_irq_handler(dev, pipe_stats);
1918 } while (0);
1920 enable_rpm_wakeref_asserts(dev_priv);
1924 static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1925 const u32 hpd[HPD_NUM_PINS])
1927 struct drm_i915_private *dev_priv = to_i915(dev);
1928 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1931 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1932 * unless we touch the hotplug register, even if hotplug_trigger is
1933 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1934 * errors.
1936 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1937 if (!hotplug_trigger) {
1938 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1939 PORTD_HOTPLUG_STATUS_MASK |
1940 PORTC_HOTPLUG_STATUS_MASK |
1941 PORTB_HOTPLUG_STATUS_MASK;
1942 dig_hotplug_reg &= ~mask;
1945 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1946 if (!hotplug_trigger)
1947 return;
1949 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1950 dig_hotplug_reg, hpd,
1951 pch_port_hotplug_long_detect);
1953 intel_hpd_irq_handler(dev, pin_mask, long_mask);
1956 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1958 struct drm_i915_private *dev_priv = dev->dev_private;
1959 int pipe;
1960 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1962 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1964 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1965 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1966 SDE_AUDIO_POWER_SHIFT);
1967 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1968 port_name(port));
1971 if (pch_iir & SDE_AUX_MASK)
1972 dp_aux_irq_handler(dev);
1974 if (pch_iir & SDE_GMBUS)
1975 gmbus_irq_handler(dev);
1977 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1978 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1980 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1981 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1983 if (pch_iir & SDE_POISON)
1984 DRM_ERROR("PCH poison interrupt\n");
1986 if (pch_iir & SDE_FDI_MASK)
1987 for_each_pipe(dev_priv, pipe)
1988 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1989 pipe_name(pipe),
1990 I915_READ(FDI_RX_IIR(pipe)));
1992 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1993 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1995 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1996 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1998 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1999 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2001 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2002 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2005 static void ivb_err_int_handler(struct drm_device *dev)
2007 struct drm_i915_private *dev_priv = dev->dev_private;
2008 u32 err_int = I915_READ(GEN7_ERR_INT);
2009 enum i915_pipe pipe;
2011 if (err_int & ERR_INT_POISON)
2012 DRM_ERROR("Poison interrupt\n");
2014 for_each_pipe(dev_priv, pipe) {
2015 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2016 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2018 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2019 if (IS_IVYBRIDGE(dev))
2020 ivb_pipe_crc_irq_handler(dev, pipe);
2021 else
2022 hsw_pipe_crc_irq_handler(dev, pipe);
2026 I915_WRITE(GEN7_ERR_INT, err_int);
2029 static void cpt_serr_int_handler(struct drm_device *dev)
2031 struct drm_i915_private *dev_priv = dev->dev_private;
2032 u32 serr_int = I915_READ(SERR_INT);
2034 if (serr_int & SERR_INT_POISON)
2035 DRM_ERROR("PCH poison interrupt\n");
2037 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2038 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2040 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2041 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2043 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2044 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2046 I915_WRITE(SERR_INT, serr_int);
2049 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2051 struct drm_i915_private *dev_priv = dev->dev_private;
2052 int pipe;
2053 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2055 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
2057 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2058 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2059 SDE_AUDIO_POWER_SHIFT_CPT);
2060 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2061 port_name(port));
2064 if (pch_iir & SDE_AUX_MASK_CPT)
2065 dp_aux_irq_handler(dev);
2067 if (pch_iir & SDE_GMBUS_CPT)
2068 gmbus_irq_handler(dev);
2070 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2071 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2073 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2074 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2076 if (pch_iir & SDE_FDI_MASK_CPT)
2077 for_each_pipe(dev_priv, pipe)
2078 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2079 pipe_name(pipe),
2080 I915_READ(FDI_RX_IIR(pipe)));
2082 if (pch_iir & SDE_ERROR_CPT)
2083 cpt_serr_int_handler(dev);
2086 static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
2088 struct drm_i915_private *dev_priv = dev->dev_private;
2089 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2090 ~SDE_PORTE_HOTPLUG_SPT;
2091 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2092 u32 pin_mask = 0, long_mask = 0;
2094 if (hotplug_trigger) {
2095 u32 dig_hotplug_reg;
2097 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2098 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2100 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2101 dig_hotplug_reg, hpd_spt,
2102 spt_port_hotplug_long_detect);
2105 if (hotplug2_trigger) {
2106 u32 dig_hotplug_reg;
2108 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2109 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2111 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2112 dig_hotplug_reg, hpd_spt,
2113 spt_port_hotplug2_long_detect);
2116 if (pin_mask)
2117 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2119 if (pch_iir & SDE_GMBUS_CPT)
2120 gmbus_irq_handler(dev);
2123 static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2124 const u32 hpd[HPD_NUM_PINS])
2126 struct drm_i915_private *dev_priv = to_i915(dev);
2127 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2129 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2130 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2132 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2133 dig_hotplug_reg, hpd,
2134 ilk_port_hotplug_long_detect);
2136 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2139 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2141 struct drm_i915_private *dev_priv = dev->dev_private;
2142 enum i915_pipe pipe;
2143 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2145 if (hotplug_trigger)
2146 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
2148 if (de_iir & DE_AUX_CHANNEL_A)
2149 dp_aux_irq_handler(dev);
2151 if (de_iir & DE_GSE)
2152 intel_opregion_asle_intr(dev);
2154 if (de_iir & DE_POISON)
2155 DRM_ERROR("Poison interrupt\n");
2157 for_each_pipe(dev_priv, pipe) {
2158 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2159 intel_pipe_handle_vblank(dev, pipe))
2160 intel_check_page_flip(dev, pipe);
2162 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2163 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2165 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2166 i9xx_pipe_crc_irq_handler(dev, pipe);
2168 /* plane/pipes map 1:1 on ilk+ */
2169 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2170 intel_prepare_page_flip(dev, pipe);
2171 intel_finish_page_flip_plane(dev, pipe);
2175 /* check event from PCH */
2176 if (de_iir & DE_PCH_EVENT) {
2177 u32 pch_iir = I915_READ(SDEIIR);
2179 if (HAS_PCH_CPT(dev))
2180 cpt_irq_handler(dev, pch_iir);
2181 else
2182 ibx_irq_handler(dev, pch_iir);
2184 /* should clear PCH hotplug event before clear CPU irq */
2185 I915_WRITE(SDEIIR, pch_iir);
2188 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2189 ironlake_rps_change_irq_handler(dev);
2192 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2194 struct drm_i915_private *dev_priv = dev->dev_private;
2195 enum i915_pipe pipe;
2196 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2198 if (hotplug_trigger)
2199 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
2201 if (de_iir & DE_ERR_INT_IVB)
2202 ivb_err_int_handler(dev);
2204 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2205 dp_aux_irq_handler(dev);
2207 if (de_iir & DE_GSE_IVB)
2208 intel_opregion_asle_intr(dev);
2210 for_each_pipe(dev_priv, pipe) {
2211 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2212 intel_pipe_handle_vblank(dev, pipe))
2213 intel_check_page_flip(dev, pipe);
2215 /* plane/pipes map 1:1 on ilk+ */
2216 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2217 intel_prepare_page_flip(dev, pipe);
2218 intel_finish_page_flip_plane(dev, pipe);
2222 /* check event from PCH */
2223 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2224 u32 pch_iir = I915_READ(SDEIIR);
2226 cpt_irq_handler(dev, pch_iir);
2228 /* clear PCH hotplug event before clear CPU irq */
2229 I915_WRITE(SDEIIR, pch_iir);
2234 * To handle irqs with the minimum potential races with fresh interrupts, we:
2235 * 1 - Disable Master Interrupt Control.
2236 * 2 - Find the source(s) of the interrupt.
2237 * 3 - Clear the Interrupt Identity bits (IIR).
2238 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2239 * 5 - Re-enable Master Interrupt Control.
2241 static irqreturn_t ironlake_irq_handler(void *arg)
2243 struct drm_device *dev = arg;
2244 struct drm_i915_private *dev_priv = dev->dev_private;
2245 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2247 if (!intel_irqs_enabled(dev_priv))
2248 return IRQ_NONE;
2250 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2251 disable_rpm_wakeref_asserts(dev_priv);
2253 /* disable master interrupt before clearing iir */
2254 de_ier = I915_READ(DEIER);
2255 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2256 POSTING_READ(DEIER);
2258 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2259 * interrupts will will be stored on its back queue, and then we'll be
2260 * able to process them after we restore SDEIER (as soon as we restore
2261 * it, we'll get an interrupt if SDEIIR still has something to process
2262 * due to its back queue). */
2263 if (!HAS_PCH_NOP(dev)) {
2264 sde_ier = I915_READ(SDEIER);
2265 I915_WRITE(SDEIER, 0);
2266 POSTING_READ(SDEIER);
2269 /* Find, clear, then process each source of interrupt */
2271 gt_iir = I915_READ(GTIIR);
2272 if (gt_iir) {
2273 I915_WRITE(GTIIR, gt_iir);
2274 if (INTEL_INFO(dev)->gen >= 6)
2275 snb_gt_irq_handler(dev_priv, gt_iir);
2276 else
2277 ilk_gt_irq_handler(dev_priv, gt_iir);
2280 de_iir = I915_READ(DEIIR);
2281 if (de_iir) {
2282 I915_WRITE(DEIIR, de_iir);
2283 if (INTEL_INFO(dev)->gen >= 7)
2284 ivb_display_irq_handler(dev, de_iir);
2285 else
2286 ilk_display_irq_handler(dev, de_iir);
2289 if (INTEL_INFO(dev)->gen >= 6) {
2290 u32 pm_iir = I915_READ(GEN6_PMIIR);
2291 if (pm_iir) {
2292 I915_WRITE(GEN6_PMIIR, pm_iir);
2293 gen6_rps_irq_handler(dev_priv, pm_iir);
2297 I915_WRITE(DEIER, de_ier);
2298 POSTING_READ(DEIER);
2299 if (!HAS_PCH_NOP(dev)) {
2300 I915_WRITE(SDEIER, sde_ier);
2301 POSTING_READ(SDEIER);
2304 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2305 enable_rpm_wakeref_asserts(dev_priv);
2309 static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2310 const u32 hpd[HPD_NUM_PINS])
2312 struct drm_i915_private *dev_priv = to_i915(dev);
2313 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2315 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2316 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2318 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2319 dig_hotplug_reg, hpd,
2320 bxt_port_hotplug_long_detect);
2322 intel_hpd_irq_handler(dev, pin_mask, long_mask);
2325 static irqreturn_t
2326 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2328 struct drm_device *dev = dev_priv->dev;
2329 u32 iir;
2330 enum i915_pipe pipe;
2332 if (master_ctl & GEN8_DE_MISC_IRQ) {
2333 iir = I915_READ(GEN8_DE_MISC_IIR);
2334 if (iir) {
2335 I915_WRITE(GEN8_DE_MISC_IIR, iir);
2336 if (iir & GEN8_DE_MISC_GSE)
2337 intel_opregion_asle_intr(dev);
2338 else
2339 DRM_ERROR("Unexpected DE Misc interrupt\n");
2341 else
2342 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2345 if (master_ctl & GEN8_DE_PORT_IRQ) {
2346 iir = I915_READ(GEN8_DE_PORT_IIR);
2347 if (iir) {
2348 u32 tmp_mask;
2349 bool found = false;
2351 I915_WRITE(GEN8_DE_PORT_IIR, iir);
2353 tmp_mask = GEN8_AUX_CHANNEL_A;
2354 if (INTEL_INFO(dev_priv)->gen >= 9)
2355 tmp_mask |= GEN9_AUX_CHANNEL_B |
2356 GEN9_AUX_CHANNEL_C |
2357 GEN9_AUX_CHANNEL_D;
2359 if (iir & tmp_mask) {
2360 dp_aux_irq_handler(dev);
2361 found = true;
2364 if (IS_BROXTON(dev_priv)) {
2365 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2366 if (tmp_mask) {
2367 bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt);
2368 found = true;
2370 } else if (IS_BROADWELL(dev_priv)) {
2371 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2372 if (tmp_mask) {
2373 ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw);
2374 found = true;
2378 if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) {
2379 gmbus_irq_handler(dev);
2380 found = true;
2383 if (!found)
2384 DRM_ERROR("Unexpected DE Port interrupt\n");
2386 else
2387 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2390 for_each_pipe(dev_priv, pipe) {
2391 u32 flip_done, fault_errors;
2393 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2394 continue;
2396 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2397 if (!iir) {
2398 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2399 continue;
2402 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2404 if (iir & GEN8_PIPE_VBLANK &&
2405 intel_pipe_handle_vblank(dev, pipe))
2406 intel_check_page_flip(dev, pipe);
2408 flip_done = iir;
2409 if (INTEL_INFO(dev_priv)->gen >= 9)
2410 flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
2411 else
2412 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
2414 if (flip_done) {
2415 intel_prepare_page_flip(dev, pipe);
2416 intel_finish_page_flip_plane(dev, pipe);
2419 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2420 hsw_pipe_crc_irq_handler(dev, pipe);
2422 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2423 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2425 fault_errors = iir;
2426 if (INTEL_INFO(dev_priv)->gen >= 9)
2427 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2428 else
2429 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2431 if (fault_errors)
2432 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2433 pipe_name(pipe),
2434 fault_errors);
2437 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2438 master_ctl & GEN8_DE_PCH_IRQ) {
2440 * FIXME(BDW): Assume for now that the new interrupt handling
2441 * scheme also closed the SDE interrupt handling race we've seen
2442 * on older pch-split platforms. But this needs testing.
2444 iir = I915_READ(SDEIIR);
2445 if (iir) {
2446 I915_WRITE(SDEIIR, iir);
2448 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
2449 spt_irq_handler(dev, iir);
2450 else
2451 cpt_irq_handler(dev, iir);
2452 } else {
2454 * Like on previous PCH there seems to be something
2455 * fishy going on with forwarding PCH interrupts.
2457 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2463 static irqreturn_t gen8_irq_handler(void *arg)
2465 struct drm_device *dev = arg;
2466 struct drm_i915_private *dev_priv = dev->dev_private;
2467 u32 master_ctl;
2468 u32 gt_iir[4] = {};
2470 if (!intel_irqs_enabled(dev_priv))
2471 return IRQ_NONE;
2473 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2474 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2475 if (!master_ctl)
2476 return IRQ_NONE;
2478 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2480 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2481 disable_rpm_wakeref_asserts(dev_priv);
2483 /* Find, clear, then process each source of interrupt */
2484 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2485 gen8_gt_irq_handler(dev_priv, gt_iir);
2486 gen8_de_irq_handler(dev_priv, master_ctl);
2488 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2489 POSTING_READ_FW(GEN8_MASTER_IRQ);
2491 enable_rpm_wakeref_asserts(dev_priv);
2495 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2496 bool reset_completed)
2498 struct intel_engine_cs *engine;
2501 * Notify all waiters for GPU completion events that reset state has
2502 * been changed, and that they need to restart their wait after
2503 * checking for potential errors (and bail out to drop locks if there is
2504 * a gpu reset pending so that i915_error_work_func can acquire them).
2507 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2508 for_each_engine(engine, dev_priv)
2509 wake_up_all(&engine->irq_queue);
2511 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2512 wake_up_all(&dev_priv->pending_flip_queue);
2515 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2516 * reset state is cleared.
2518 if (reset_completed)
2519 wake_up_all(&dev_priv->gpu_error.reset_queue);
2523 * i915_reset_and_wakeup - do process context error handling work
2524 * @dev: drm device
2526 * Fire an error uevent so userspace can see that a hang or error
2527 * was detected.
2529 static void i915_reset_and_wakeup(struct drm_device *dev)
2531 struct drm_i915_private *dev_priv = to_i915(dev);
2532 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2533 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2534 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2535 int ret;
2537 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2540 * Note that there's only one work item which does gpu resets, so we
2541 * need not worry about concurrent gpu resets potentially incrementing
2542 * error->reset_counter twice. We only need to take care of another
2543 * racing irq/hangcheck declaring the gpu dead for a second time. A
2544 * quick check for that is good enough: schedule_work ensures the
2545 * correct ordering between hang detection and this work item, and since
2546 * the reset in-progress bit is only ever set by code outside of this
2547 * work we don't need to worry about any other races.
2549 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
2550 DRM_DEBUG_DRIVER("resetting chip\n");
2551 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2552 reset_event);
2555 * In most cases it's guaranteed that we get here with an RPM
2556 * reference held, for example because there is a pending GPU
2557 * request that won't finish until the reset is done. This
2558 * isn't the case at least when we get here by doing a
2559 * simulated reset via debugs, so get an RPM reference.
2561 intel_runtime_pm_get(dev_priv);
2563 intel_prepare_reset(dev);
2566 * All state reset _must_ be completed before we update the
2567 * reset counter, for otherwise waiters might miss the reset
2568 * pending state and not properly drop locks, resulting in
2569 * deadlocks with the reset work.
2571 ret = i915_reset(dev);
2573 intel_finish_reset(dev);
2575 intel_runtime_pm_put(dev_priv);
2577 if (ret == 0)
2578 kobject_uevent_env(&dev->primary->kdev->kobj,
2579 KOBJ_CHANGE, reset_done_event);
2582 * Note: The wake_up also serves as a memory barrier so that
2583 * waiters see the update value of the reset counter atomic_t.
2585 i915_error_wake_up(dev_priv, true);
2589 static void i915_report_and_clear_eir(struct drm_device *dev)
2591 struct drm_i915_private *dev_priv = dev->dev_private;
2592 uint32_t instdone[I915_NUM_INSTDONE_REG];
2593 u32 eir = I915_READ(EIR);
2594 int pipe, i;
2596 if (!eir)
2597 return;
2599 pr_err("render error detected, EIR: 0x%08x\n", eir);
2601 i915_get_extra_instdone(dev, instdone);
2603 if (IS_G4X(dev)) {
2604 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2605 u32 ipeir = I915_READ(IPEIR_I965);
2607 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2608 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2609 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2610 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2611 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2612 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2613 I915_WRITE(IPEIR_I965, ipeir);
2614 POSTING_READ(IPEIR_I965);
2616 if (eir & GM45_ERROR_PAGE_TABLE) {
2617 u32 pgtbl_err = I915_READ(PGTBL_ER);
2618 pr_err("page table error\n");
2619 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2620 I915_WRITE(PGTBL_ER, pgtbl_err);
2621 POSTING_READ(PGTBL_ER);
2625 if (!IS_GEN2(dev)) {
2626 if (eir & I915_ERROR_PAGE_TABLE) {
2627 u32 pgtbl_err = I915_READ(PGTBL_ER);
2628 pr_err("page table error\n");
2629 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2630 I915_WRITE(PGTBL_ER, pgtbl_err);
2631 POSTING_READ(PGTBL_ER);
2635 if (eir & I915_ERROR_MEMORY_REFRESH) {
2636 pr_err("memory refresh error:\n");
2637 for_each_pipe(dev_priv, pipe)
2638 pr_err("pipe %c stat: 0x%08x\n",
2639 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2640 /* pipestat has already been acked */
2642 if (eir & I915_ERROR_INSTRUCTION) {
2643 pr_err("instruction error\n");
2644 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2645 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2646 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2647 if (INTEL_INFO(dev)->gen < 4) {
2648 u32 ipeir = I915_READ(IPEIR);
2650 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2651 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2652 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2653 I915_WRITE(IPEIR, ipeir);
2654 POSTING_READ(IPEIR);
2655 } else {
2656 u32 ipeir = I915_READ(IPEIR_I965);
2658 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2659 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2660 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2661 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2662 I915_WRITE(IPEIR_I965, ipeir);
2663 POSTING_READ(IPEIR_I965);
2667 I915_WRITE(EIR, eir);
2668 POSTING_READ(EIR);
2669 eir = I915_READ(EIR);
2670 if (eir) {
2672 * some errors might have become stuck,
2673 * mask them.
2675 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2676 I915_WRITE(EMR, I915_READ(EMR) | eir);
2677 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2682 * i915_handle_error - handle a gpu error
2683 * @dev: drm device
2684 * @engine_mask: mask representing engines that are hung
2685 * Do some basic checking of register state at error time and
2686 * dump it to the syslog. Also call i915_capture_error_state() to make
2687 * sure we get a record and make it available in debugfs. Fire a uevent
2688 * so userspace knows something bad happened (should trigger collection
2689 * of a ring dump etc.).
2691 void i915_handle_error(struct drm_device *dev, u32 engine_mask,
2692 const char *fmt, ...)
2694 struct drm_i915_private *dev_priv = dev->dev_private;
2695 #if 0
2696 va_list args;
2697 char error_msg[80];
2699 va_start(args, fmt);
2700 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2701 va_end(args);
2703 i915_capture_error_state(dev, engine_mask, error_msg);
2704 #endif
2705 i915_report_and_clear_eir(dev);
2707 if (engine_mask) {
2708 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
2709 &dev_priv->gpu_error.reset_counter);
2712 * Wakeup waiting processes so that the reset function
2713 * i915_reset_and_wakeup doesn't deadlock trying to grab
2714 * various locks. By bumping the reset counter first, the woken
2715 * processes will see a reset in progress and back off,
2716 * releasing their locks and then wait for the reset completion.
2717 * We must do this for _all_ gpu waiters that might hold locks
2718 * that the reset work needs to acquire.
2720 * Note: The wake_up serves as the required memory barrier to
2721 * ensure that the waiters see the updated value of the reset
2722 * counter atomic_t.
2724 i915_error_wake_up(dev_priv, false);
2727 i915_reset_and_wakeup(dev);
2730 /* Called from drm generic code, passed 'crtc' which
2731 * we use as a pipe index
2733 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2735 struct drm_i915_private *dev_priv = dev->dev_private;
2736 unsigned long irqflags;
2738 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2739 if (INTEL_INFO(dev)->gen >= 4)
2740 i915_enable_pipestat(dev_priv, pipe,
2741 PIPE_START_VBLANK_INTERRUPT_STATUS);
2742 else
2743 i915_enable_pipestat(dev_priv, pipe,
2744 PIPE_VBLANK_INTERRUPT_STATUS);
2745 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2747 return 0;
2750 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2752 struct drm_i915_private *dev_priv = dev->dev_private;
2753 unsigned long irqflags;
2754 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2755 DE_PIPE_VBLANK(pipe);
2757 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2758 ilk_enable_display_irq(dev_priv, bit);
2759 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2761 return 0;
2764 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2766 struct drm_i915_private *dev_priv = dev->dev_private;
2767 unsigned long irqflags;
2769 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2770 i915_enable_pipestat(dev_priv, pipe,
2771 PIPE_START_VBLANK_INTERRUPT_STATUS);
2772 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2774 return 0;
2777 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2779 struct drm_i915_private *dev_priv = dev->dev_private;
2780 unsigned long irqflags;
2782 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2783 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2784 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2786 return 0;
2789 /* Called from drm generic code, passed 'crtc' which
2790 * we use as a pipe index
2792 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2794 struct drm_i915_private *dev_priv = dev->dev_private;
2795 unsigned long irqflags;
2797 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2798 i915_disable_pipestat(dev_priv, pipe,
2799 PIPE_VBLANK_INTERRUPT_STATUS |
2800 PIPE_START_VBLANK_INTERRUPT_STATUS);
2801 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2804 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2806 struct drm_i915_private *dev_priv = dev->dev_private;
2807 unsigned long irqflags;
2808 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2809 DE_PIPE_VBLANK(pipe);
2811 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2812 ilk_disable_display_irq(dev_priv, bit);
2813 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2816 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2818 struct drm_i915_private *dev_priv = dev->dev_private;
2819 unsigned long irqflags;
2821 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2822 i915_disable_pipestat(dev_priv, pipe,
2823 PIPE_START_VBLANK_INTERRUPT_STATUS);
2824 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2827 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2829 struct drm_i915_private *dev_priv = dev->dev_private;
2830 unsigned long irqflags;
2832 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2833 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2834 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2837 static bool
2838 ring_idle(struct intel_engine_cs *engine, u32 seqno)
2840 return i915_seqno_passed(seqno,
2841 READ_ONCE(engine->last_submitted_seqno));
2844 static bool
2845 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2847 if (INTEL_INFO(dev)->gen >= 8) {
2848 return (ipehr >> 23) == 0x1c;
2849 } else {
2850 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2851 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2852 MI_SEMAPHORE_REGISTER);
2856 static struct intel_engine_cs *
2857 semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
2858 u64 offset)
2860 struct drm_i915_private *dev_priv = engine->dev->dev_private;
2861 struct intel_engine_cs *signaller;
2863 if (INTEL_INFO(dev_priv)->gen >= 8) {
2864 for_each_engine(signaller, dev_priv) {
2865 if (engine == signaller)
2866 continue;
2868 if (offset == signaller->semaphore.signal_ggtt[engine->id])
2869 return signaller;
2871 } else {
2872 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2874 for_each_engine(signaller, dev_priv) {
2875 if(engine == signaller)
2876 continue;
2878 if (sync_bits == signaller->semaphore.mbox.wait[engine->id])
2879 return signaller;
2883 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2884 engine->id, ipehr, offset);
2886 return NULL;
2889 static struct intel_engine_cs *
2890 semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2892 struct drm_i915_private *dev_priv = engine->dev->dev_private;
2893 u32 cmd, ipehr, head;
2894 u64 offset = 0;
2895 int i, backwards;
2898 * This function does not support execlist mode - any attempt to
2899 * proceed further into this function will result in a kernel panic
2900 * when dereferencing ring->buffer, which is not set up in execlist
2901 * mode.
2903 * The correct way of doing it would be to derive the currently
2904 * executing ring buffer from the current context, which is derived
2905 * from the currently running request. Unfortunately, to get the
2906 * current request we would have to grab the struct_mutex before doing
2907 * anything else, which would be ill-advised since some other thread
2908 * might have grabbed it already and managed to hang itself, causing
2909 * the hang checker to deadlock.
2911 * Therefore, this function does not support execlist mode in its
2912 * current form. Just return NULL and move on.
2914 if (engine->buffer == NULL)
2915 return NULL;
2917 ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
2918 if (!ipehr_is_semaphore_wait(engine->dev, ipehr))
2919 return NULL;
2922 * HEAD is likely pointing to the dword after the actual command,
2923 * so scan backwards until we find the MBOX. But limit it to just 3
2924 * or 4 dwords depending on the semaphore wait command size.
2925 * Note that we don't care about ACTHD here since that might
2926 * point at at batch, and semaphores are always emitted into the
2927 * ringbuffer itself.
2929 head = I915_READ_HEAD(engine) & HEAD_ADDR;
2930 backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4;
2932 for (i = backwards; i; --i) {
2934 * Be paranoid and presume the hw has gone off into the wild -
2935 * our ring is smaller than what the hardware (and hence
2936 * HEAD_ADDR) allows. Also handles wrap-around.
2938 head &= engine->buffer->size - 1;
2940 /* This here seems to blow up */
2941 cmd = ioread32(engine->buffer->virtual_start + head);
2942 if (cmd == ipehr)
2943 break;
2945 head -= 4;
2948 if (!i)
2949 return NULL;
2951 *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
2952 if (INTEL_INFO(engine->dev)->gen >= 8) {
2953 offset = ioread32(engine->buffer->virtual_start + head + 12);
2954 offset <<= 32;
2955 offset = ioread32(engine->buffer->virtual_start + head + 8);
2957 return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
2960 static int semaphore_passed(struct intel_engine_cs *engine)
2962 struct drm_i915_private *dev_priv = engine->dev->dev_private;
2963 struct intel_engine_cs *signaller;
2964 u32 seqno;
2966 engine->hangcheck.deadlock++;
2968 signaller = semaphore_waits_for(engine, &seqno);
2969 if (signaller == NULL)
2970 return -1;
2972 /* Prevent pathological recursion due to driver bugs */
2973 if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
2974 return -1;
2976 if (i915_seqno_passed(signaller->get_seqno(signaller), seqno))
2977 return 1;
2979 /* cursory check for an unkickable deadlock */
2980 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2981 semaphore_passed(signaller) < 0)
2982 return -1;
2984 return 0;
2987 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2989 struct intel_engine_cs *engine;
2991 for_each_engine(engine, dev_priv)
2992 engine->hangcheck.deadlock = 0;
2995 static bool subunits_stuck(struct intel_engine_cs *engine)
2997 u32 instdone[I915_NUM_INSTDONE_REG];
2998 bool stuck;
2999 int i;
3001 if (engine->id != RCS)
3002 return true;
3004 i915_get_extra_instdone(engine->dev, instdone);
3006 /* There might be unstable subunit states even when
3007 * actual head is not moving. Filter out the unstable ones by
3008 * accumulating the undone -> done transitions and only
3009 * consider those as progress.
3011 stuck = true;
3012 for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
3013 const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
3015 if (tmp != engine->hangcheck.instdone[i])
3016 stuck = false;
3018 engine->hangcheck.instdone[i] |= tmp;
3021 return stuck;
3024 static enum intel_ring_hangcheck_action
3025 head_stuck(struct intel_engine_cs *engine, u64 acthd)
3027 if (acthd != engine->hangcheck.acthd) {
3029 /* Clear subunit states on head movement */
3030 memset(engine->hangcheck.instdone, 0,
3031 sizeof(engine->hangcheck.instdone));
3033 return HANGCHECK_ACTIVE;
3036 if (!subunits_stuck(engine))
3037 return HANGCHECK_ACTIVE;
3039 return HANGCHECK_HUNG;
3042 static enum intel_ring_hangcheck_action
3043 ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3045 struct drm_device *dev = engine->dev;
3046 struct drm_i915_private *dev_priv = dev->dev_private;
3047 enum intel_ring_hangcheck_action ha;
3048 u32 tmp;
3050 ha = head_stuck(engine, acthd);
3051 if (ha != HANGCHECK_HUNG)
3052 return ha;
3054 if (IS_GEN2(dev))
3055 return HANGCHECK_HUNG;
3057 /* Is the chip hanging on a WAIT_FOR_EVENT?
3058 * If so we can simply poke the RB_WAIT bit
3059 * and break the hang. This should work on
3060 * all but the second generation chipsets.
3062 tmp = I915_READ_CTL(engine);
3063 if (tmp & RING_WAIT) {
3064 i915_handle_error(dev, 0,
3065 "Kicking stuck wait on %s",
3066 engine->name);
3067 I915_WRITE_CTL(engine, tmp);
3068 return HANGCHECK_KICK;
3071 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3072 switch (semaphore_passed(engine)) {
3073 default:
3074 return HANGCHECK_HUNG;
3075 case 1:
3076 i915_handle_error(dev, 0,
3077 "Kicking stuck semaphore on %s",
3078 engine->name);
3079 I915_WRITE_CTL(engine, tmp);
3080 return HANGCHECK_KICK;
3081 case 0:
3082 return HANGCHECK_WAIT;
3086 return HANGCHECK_HUNG;
3089 static unsigned kick_waiters(struct intel_engine_cs *engine)
3091 struct drm_i915_private *i915 = to_i915(engine->dev);
3092 unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
3094 if (engine->hangcheck.user_interrupts == user_interrupts &&
3095 !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
3096 if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine)))
3097 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3098 engine->name);
3099 else
3100 DRM_INFO("Fake missed irq on %s\n",
3101 engine->name);
3102 wake_up_all(&engine->irq_queue);
3105 return user_interrupts;
3108 * This is called when the chip hasn't reported back with completed
3109 * batchbuffers in a long time. We keep track per ring seqno progress and
3110 * if there are no progress, hangcheck score for that ring is increased.
3111 * Further, acthd is inspected to see if the ring is stuck. On stuck case
3112 * we kick the ring. If we see no progress on three subsequent calls
3113 * we assume chip is wedged and try to fix it by resetting the chip.
3115 static void i915_hangcheck_elapsed(struct work_struct *work)
3117 struct drm_i915_private *dev_priv =
3118 container_of(work, typeof(*dev_priv),
3119 gpu_error.hangcheck_work.work);
3120 struct drm_device *dev = dev_priv->dev;
3121 struct intel_engine_cs *engine;
3122 enum intel_engine_id id;
3123 int busy_count = 0, rings_hung = 0;
3124 bool stuck[I915_NUM_ENGINES] = { 0 };
3125 #define BUSY 1
3126 #define KICK 5
3127 #define HUNG 20
3128 #define ACTIVE_DECAY 15
3130 if (!i915.enable_hangcheck)
3131 return;
3134 * The hangcheck work is synced during runtime suspend, we don't
3135 * require a wakeref. TODO: instead of disabling the asserts make
3136 * sure that we hold a reference when this work is running.
3138 DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3140 /* As enabling the GPU requires fairly extensive mmio access,
3141 * periodically arm the mmio checker to see if we are triggering
3142 * any invalid access.
3144 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
3146 for_each_engine_id(engine, dev_priv, id) {
3147 u64 acthd;
3148 u32 seqno;
3149 unsigned user_interrupts;
3150 bool busy = true;
3152 semaphore_clear_deadlocks(dev_priv);
3154 /* We don't strictly need an irq-barrier here, as we are not
3155 * serving an interrupt request, be paranoid in case the
3156 * barrier has side-effects (such as preventing a broken
3157 * cacheline snoop) and so be sure that we can see the seqno
3158 * advance. If the seqno should stick, due to a stale
3159 * cacheline, we would erroneously declare the GPU hung.
3161 if (engine->irq_seqno_barrier)
3162 engine->irq_seqno_barrier(engine);
3164 acthd = intel_ring_get_active_head(engine);
3165 seqno = engine->get_seqno(engine);
3167 /* Reset stuck interrupts between batch advances */
3168 user_interrupts = 0;
3170 if (engine->hangcheck.seqno == seqno) {
3171 if (ring_idle(engine, seqno)) {
3172 engine->hangcheck.action = HANGCHECK_IDLE;
3173 if (waitqueue_active(&engine->irq_queue)) {
3174 /* Safeguard against driver failure */
3175 user_interrupts = kick_waiters(engine);
3176 engine->hangcheck.score += BUSY;
3177 } else
3178 busy = false;
3179 } else {
3180 /* We always increment the hangcheck score
3181 * if the ring is busy and still processing
3182 * the same request, so that no single request
3183 * can run indefinitely (such as a chain of
3184 * batches). The only time we do not increment
3185 * the hangcheck score on this ring, if this
3186 * ring is in a legitimate wait for another
3187 * ring. In that case the waiting ring is a
3188 * victim and we want to be sure we catch the
3189 * right culprit. Then every time we do kick
3190 * the ring, add a small increment to the
3191 * score so that we can catch a batch that is
3192 * being repeatedly kicked and so responsible
3193 * for stalling the machine.
3195 engine->hangcheck.action = ring_stuck(engine,
3196 acthd);
3198 switch (engine->hangcheck.action) {
3199 case HANGCHECK_IDLE:
3200 case HANGCHECK_WAIT:
3201 break;
3202 case HANGCHECK_ACTIVE:
3203 engine->hangcheck.score += BUSY;
3204 break;
3205 case HANGCHECK_KICK:
3206 engine->hangcheck.score += KICK;
3207 break;
3208 case HANGCHECK_HUNG:
3209 engine->hangcheck.score += HUNG;
3210 stuck[id] = true;
3211 break;
3214 } else {
3215 engine->hangcheck.action = HANGCHECK_ACTIVE;
3217 /* Gradually reduce the count so that we catch DoS
3218 * attempts across multiple batches.
3220 if (engine->hangcheck.score > 0)
3221 engine->hangcheck.score -= ACTIVE_DECAY;
3222 if (engine->hangcheck.score < 0)
3223 engine->hangcheck.score = 0;
3225 /* Clear head and subunit states on seqno movement */
3226 acthd = 0;
3228 memset(engine->hangcheck.instdone, 0,
3229 sizeof(engine->hangcheck.instdone));
3232 engine->hangcheck.seqno = seqno;
3233 engine->hangcheck.acthd = acthd;
3234 engine->hangcheck.user_interrupts = user_interrupts;
3235 busy_count += busy;
3238 for_each_engine_id(engine, dev_priv, id) {
3239 if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3240 DRM_INFO("%s on %s\n",
3241 stuck[id] ? "stuck" : "no progress",
3242 engine->name);
3243 rings_hung |= intel_engine_flag(engine);
3247 if (rings_hung) {
3248 i915_handle_error(dev, rings_hung, "Engine(s) hung");
3249 goto out;
3252 if (busy_count)
3253 /* Reset timer case chip hangs without another request
3254 * being added */
3255 i915_queue_hangcheck(dev);
3257 out:
3258 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3261 void i915_queue_hangcheck(struct drm_device *dev)
3263 struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
3265 if (!i915.enable_hangcheck)
3266 return;
3268 /* Don't continually defer the hangcheck so that it is always run at
3269 * least once after work has been scheduled on any ring. Otherwise,
3270 * we will ignore a hung ring if a second ring is kept busy.
3273 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3274 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
3277 static void ibx_irq_reset(struct drm_device *dev)
3279 struct drm_i915_private *dev_priv = dev->dev_private;
3281 if (HAS_PCH_NOP(dev))
3282 return;
3284 GEN5_IRQ_RESET(SDE);
3286 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3287 I915_WRITE(SERR_INT, 0xffffffff);
3291 * SDEIER is also touched by the interrupt handler to work around missed PCH
3292 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3293 * instead we unconditionally enable all PCH interrupt sources here, but then
3294 * only unmask them as needed with SDEIMR.
3296 * This function needs to be called before interrupts are enabled.
3298 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3300 struct drm_i915_private *dev_priv = dev->dev_private;
3302 if (HAS_PCH_NOP(dev))
3303 return;
3305 WARN_ON(I915_READ(SDEIER) != 0);
3306 I915_WRITE(SDEIER, 0xffffffff);
3307 POSTING_READ(SDEIER);
3310 static void gen5_gt_irq_reset(struct drm_device *dev)
3312 struct drm_i915_private *dev_priv = dev->dev_private;
3314 GEN5_IRQ_RESET(GT);
3315 if (INTEL_INFO(dev)->gen >= 6)
3316 GEN5_IRQ_RESET(GEN6_PM);
3319 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3321 enum i915_pipe pipe;
3323 if (IS_CHERRYVIEW(dev_priv))
3324 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3325 else
3326 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3328 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3329 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3331 for_each_pipe(dev_priv, pipe) {
3332 I915_WRITE(PIPESTAT(pipe),
3333 PIPE_FIFO_UNDERRUN_STATUS |
3334 PIPESTAT_INT_STATUS_MASK);
3335 dev_priv->pipestat_irq_mask[pipe] = 0;
3338 GEN5_IRQ_RESET(VLV_);
3339 dev_priv->irq_mask = ~0;
3342 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3344 u32 pipestat_mask;
3345 u32 enable_mask;
3346 enum i915_pipe pipe;
3348 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3349 PIPE_CRC_DONE_INTERRUPT_STATUS;
3351 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3352 for_each_pipe(dev_priv, pipe)
3353 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3355 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3356 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3357 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3358 if (IS_CHERRYVIEW(dev_priv))
3359 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3361 WARN_ON(dev_priv->irq_mask != ~0);
3363 dev_priv->irq_mask = ~enable_mask;
3365 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
3368 /* drm_dma.h hooks
3370 static void ironlake_irq_reset(struct drm_device *dev)
3372 struct drm_i915_private *dev_priv = dev->dev_private;
3374 I915_WRITE(HWSTAM, 0xffffffff);
3376 GEN5_IRQ_RESET(DE);
3377 if (IS_GEN7(dev))
3378 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3380 gen5_gt_irq_reset(dev);
3382 ibx_irq_reset(dev);
3385 static void valleyview_irq_preinstall(struct drm_device *dev)
3387 struct drm_i915_private *dev_priv = dev->dev_private;
3389 I915_WRITE(VLV_MASTER_IER, 0);
3390 POSTING_READ(VLV_MASTER_IER);
3392 gen5_gt_irq_reset(dev);
3394 spin_lock_irq(&dev_priv->irq_lock);
3395 if (dev_priv->display_irqs_enabled)
3396 vlv_display_irq_reset(dev_priv);
3397 spin_unlock_irq(&dev_priv->irq_lock);
3400 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3402 GEN8_IRQ_RESET_NDX(GT, 0);
3403 GEN8_IRQ_RESET_NDX(GT, 1);
3404 GEN8_IRQ_RESET_NDX(GT, 2);
3405 GEN8_IRQ_RESET_NDX(GT, 3);
3408 static void gen8_irq_reset(struct drm_device *dev)
3410 struct drm_i915_private *dev_priv = dev->dev_private;
3411 int pipe;
3413 I915_WRITE(GEN8_MASTER_IRQ, 0);
3414 POSTING_READ(GEN8_MASTER_IRQ);
3416 gen8_gt_irq_reset(dev_priv);
3418 for_each_pipe(dev_priv, pipe)
3419 if (intel_display_power_is_enabled(dev_priv,
3420 POWER_DOMAIN_PIPE(pipe)))
3421 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3423 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3424 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3425 GEN5_IRQ_RESET(GEN8_PCU_);
3427 if (HAS_PCH_SPLIT(dev))
3428 ibx_irq_reset(dev);
3431 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3432 unsigned int pipe_mask)
3434 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3435 enum i915_pipe pipe;
3437 spin_lock_irq(&dev_priv->irq_lock);
3438 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3439 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3440 dev_priv->de_irq_mask[pipe],
3441 ~dev_priv->de_irq_mask[pipe] | extra_ier);
3442 spin_unlock_irq(&dev_priv->irq_lock);
3445 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3446 unsigned int pipe_mask)
3448 enum i915_pipe pipe;
3450 spin_lock_irq(&dev_priv->irq_lock);
3451 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3452 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3453 spin_unlock_irq(&dev_priv->irq_lock);
3455 /* make sure we're done processing display irqs */
3456 synchronize_irq(dev_priv->dev->irq);
3459 static void cherryview_irq_preinstall(struct drm_device *dev)
3461 struct drm_i915_private *dev_priv = dev->dev_private;
3463 I915_WRITE(GEN8_MASTER_IRQ, 0);
3464 POSTING_READ(GEN8_MASTER_IRQ);
3466 gen8_gt_irq_reset(dev_priv);
3468 GEN5_IRQ_RESET(GEN8_PCU_);
3470 spin_lock_irq(&dev_priv->irq_lock);
3471 if (dev_priv->display_irqs_enabled)
3472 vlv_display_irq_reset(dev_priv);
3473 spin_unlock_irq(&dev_priv->irq_lock);
3476 static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3477 const u32 hpd[HPD_NUM_PINS])
3479 struct drm_i915_private *dev_priv = to_i915(dev);
3480 struct intel_encoder *encoder;
3481 u32 enabled_irqs = 0;
3483 for_each_intel_encoder(dev, encoder)
3484 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3485 enabled_irqs |= hpd[encoder->hpd_pin];
3487 return enabled_irqs;
3490 static void ibx_hpd_irq_setup(struct drm_device *dev)
3492 struct drm_i915_private *dev_priv = dev->dev_private;
3493 u32 hotplug_irqs, hotplug, enabled_irqs;
3495 if (HAS_PCH_IBX(dev)) {
3496 hotplug_irqs = SDE_HOTPLUG_MASK;
3497 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
3498 } else {
3499 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3500 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
3503 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3506 * Enable digital hotplug on the PCH, and configure the DP short pulse
3507 * duration to 2ms (which is the minimum in the Display Port spec).
3508 * The pulse duration bits are reserved on LPT+.
3510 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3511 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3512 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3513 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3514 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3516 * When CPU and PCH are on the same package, port A
3517 * HPD must be enabled in both north and south.
3519 if (HAS_PCH_LPT_LP(dev))
3520 hotplug |= PORTA_HOTPLUG_ENABLE;
3521 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3524 static void spt_hpd_irq_setup(struct drm_device *dev)
3526 struct drm_i915_private *dev_priv = dev->dev_private;
3527 u32 hotplug_irqs, hotplug, enabled_irqs;
3529 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3530 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3532 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3534 /* Enable digital hotplug on the PCH */
3535 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3536 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3537 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3538 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3540 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3541 hotplug |= PORTE_HOTPLUG_ENABLE;
3542 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3545 static void ilk_hpd_irq_setup(struct drm_device *dev)
3547 struct drm_i915_private *dev_priv = dev->dev_private;
3548 u32 hotplug_irqs, hotplug, enabled_irqs;
3550 if (INTEL_INFO(dev)->gen >= 8) {
3551 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3552 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
3554 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3555 } else if (INTEL_INFO(dev)->gen >= 7) {
3556 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3557 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3559 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3560 } else {
3561 hotplug_irqs = DE_DP_A_HOTPLUG;
3562 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
3564 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3568 * Enable digital hotplug on the CPU, and configure the DP short pulse
3569 * duration to 2ms (which is the minimum in the Display Port spec)
3570 * The pulse duration bits are reserved on HSW+.
3572 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3573 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3574 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3575 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3577 ibx_hpd_irq_setup(dev);
3580 static void bxt_hpd_irq_setup(struct drm_device *dev)
3582 struct drm_i915_private *dev_priv = dev->dev_private;
3583 u32 hotplug_irqs, hotplug, enabled_irqs;
3585 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
3586 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3588 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3590 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3591 hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3592 PORTA_HOTPLUG_ENABLE;
3594 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3595 hotplug, enabled_irqs);
3596 hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3599 * For BXT invert bit has to be set based on AOB design
3600 * for HPD detection logic, update it based on VBT fields.
3603 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3604 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3605 hotplug |= BXT_DDIA_HPD_INVERT;
3606 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3607 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3608 hotplug |= BXT_DDIB_HPD_INVERT;
3609 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3610 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3611 hotplug |= BXT_DDIC_HPD_INVERT;
3613 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3616 static void ibx_irq_postinstall(struct drm_device *dev)
3618 struct drm_i915_private *dev_priv = dev->dev_private;
3619 u32 mask;
3621 if (HAS_PCH_NOP(dev))
3622 return;
3624 if (HAS_PCH_IBX(dev))
3625 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3626 else
3627 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3629 gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3630 I915_WRITE(SDEIMR, ~mask);
3633 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3635 struct drm_i915_private *dev_priv = dev->dev_private;
3636 u32 pm_irqs, gt_irqs;
3638 pm_irqs = gt_irqs = 0;
3640 dev_priv->gt_irq_mask = ~0;
3641 if (HAS_L3_DPF(dev)) {
3642 /* L3 parity interrupt is always unmasked. */
3643 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3644 gt_irqs |= GT_PARITY_ERROR(dev);
3647 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3648 if (IS_GEN5(dev)) {
3649 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3650 ILK_BSD_USER_INTERRUPT;
3651 } else {
3652 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3655 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3657 if (INTEL_INFO(dev)->gen >= 6) {
3659 * RPS interrupts will get enabled/disabled on demand when RPS
3660 * itself is enabled/disabled.
3662 if (HAS_VEBOX(dev))
3663 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3665 dev_priv->pm_irq_mask = 0xffffffff;
3666 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3670 static int ironlake_irq_postinstall(struct drm_device *dev)
3672 struct drm_i915_private *dev_priv = dev->dev_private;
3673 u32 display_mask, extra_mask;
3675 if (INTEL_INFO(dev)->gen >= 7) {
3676 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3677 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3678 DE_PLANEB_FLIP_DONE_IVB |
3679 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3680 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3681 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3682 DE_DP_A_HOTPLUG_IVB);
3683 } else {
3684 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3685 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3686 DE_AUX_CHANNEL_A |
3687 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3688 DE_POISON);
3689 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3690 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3691 DE_DP_A_HOTPLUG);
3694 dev_priv->irq_mask = ~display_mask;
3696 I915_WRITE(HWSTAM, 0xeffe);
3698 ibx_irq_pre_postinstall(dev);
3700 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3702 gen5_gt_irq_postinstall(dev);
3704 ibx_irq_postinstall(dev);
3706 if (IS_IRONLAKE_M(dev)) {
3707 /* Enable PCU event interrupts
3709 * spinlocking not required here for correctness since interrupt
3710 * setup is guaranteed to run in single-threaded context. But we
3711 * need it to make the assert_spin_locked happy. */
3712 spin_lock_irq(&dev_priv->irq_lock);
3713 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3714 spin_unlock_irq(&dev_priv->irq_lock);
3717 return 0;
3720 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3722 assert_spin_locked(&dev_priv->irq_lock);
3724 if (dev_priv->display_irqs_enabled)
3725 return;
3727 dev_priv->display_irqs_enabled = true;
3729 if (intel_irqs_enabled(dev_priv)) {
3730 vlv_display_irq_reset(dev_priv);
3731 vlv_display_irq_postinstall(dev_priv);
3735 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3737 assert_spin_locked(&dev_priv->irq_lock);
3739 if (!dev_priv->display_irqs_enabled)
3740 return;
3742 dev_priv->display_irqs_enabled = false;
3744 if (intel_irqs_enabled(dev_priv))
3745 vlv_display_irq_reset(dev_priv);
3749 static int valleyview_irq_postinstall(struct drm_device *dev)
3751 struct drm_i915_private *dev_priv = dev->dev_private;
3753 gen5_gt_irq_postinstall(dev);
3755 spin_lock_irq(&dev_priv->irq_lock);
3756 if (dev_priv->display_irqs_enabled)
3757 vlv_display_irq_postinstall(dev_priv);
3758 spin_unlock_irq(&dev_priv->irq_lock);
3760 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3761 POSTING_READ(VLV_MASTER_IER);
3763 return 0;
3766 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3768 /* These are interrupts we'll toggle with the ring mask register */
3769 uint32_t gt_interrupts[] = {
3770 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3771 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3772 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3773 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3774 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3775 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3776 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3777 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3779 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3780 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3783 if (HAS_L3_DPF(dev_priv))
3784 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
3786 dev_priv->pm_irq_mask = 0xffffffff;
3787 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3788 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3790 * RPS interrupts will get enabled/disabled on demand when RPS itself
3791 * is enabled/disabled.
3793 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3794 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3797 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3799 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3800 uint32_t de_pipe_enables;
3801 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3802 u32 de_port_enables;
3803 enum i915_pipe pipe;
3805 if (INTEL_INFO(dev_priv)->gen >= 9) {
3806 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3807 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3808 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3809 GEN9_AUX_CHANNEL_D;
3810 if (IS_BROXTON(dev_priv))
3811 de_port_masked |= BXT_DE_PORT_GMBUS;
3812 } else {
3813 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3814 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3817 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3818 GEN8_PIPE_FIFO_UNDERRUN;
3820 de_port_enables = de_port_masked;
3821 if (IS_BROXTON(dev_priv))
3822 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3823 else if (IS_BROADWELL(dev_priv))
3824 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3826 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3827 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3828 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3830 for_each_pipe(dev_priv, pipe)
3831 if (intel_display_power_is_enabled(dev_priv,
3832 POWER_DOMAIN_PIPE(pipe)))
3833 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3834 dev_priv->de_irq_mask[pipe],
3835 de_pipe_enables);
3837 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3840 static int gen8_irq_postinstall(struct drm_device *dev)
3842 struct drm_i915_private *dev_priv = dev->dev_private;
3844 if (HAS_PCH_SPLIT(dev))
3845 ibx_irq_pre_postinstall(dev);
3847 gen8_gt_irq_postinstall(dev_priv);
3848 gen8_de_irq_postinstall(dev_priv);
3850 if (HAS_PCH_SPLIT(dev))
3851 ibx_irq_postinstall(dev);
3853 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3854 POSTING_READ(GEN8_MASTER_IRQ);
3856 return 0;
3859 static int cherryview_irq_postinstall(struct drm_device *dev)
3861 struct drm_i915_private *dev_priv = dev->dev_private;
3863 gen8_gt_irq_postinstall(dev_priv);
3865 spin_lock_irq(&dev_priv->irq_lock);
3866 if (dev_priv->display_irqs_enabled)
3867 vlv_display_irq_postinstall(dev_priv);
3868 spin_unlock_irq(&dev_priv->irq_lock);
3870 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3871 POSTING_READ(GEN8_MASTER_IRQ);
3873 return 0;
3876 static void gen8_irq_uninstall(struct drm_device *dev)
3878 struct drm_i915_private *dev_priv = dev->dev_private;
3880 if (!dev_priv)
3881 return;
3883 gen8_irq_reset(dev);
3886 static void valleyview_irq_uninstall(struct drm_device *dev)
3888 struct drm_i915_private *dev_priv = dev->dev_private;
3890 if (!dev_priv)
3891 return;
3893 I915_WRITE(VLV_MASTER_IER, 0);
3894 POSTING_READ(VLV_MASTER_IER);
3896 gen5_gt_irq_reset(dev);
3898 I915_WRITE(HWSTAM, 0xffffffff);
3900 spin_lock_irq(&dev_priv->irq_lock);
3901 if (dev_priv->display_irqs_enabled)
3902 vlv_display_irq_reset(dev_priv);
3903 spin_unlock_irq(&dev_priv->irq_lock);
3906 static void cherryview_irq_uninstall(struct drm_device *dev)
3908 struct drm_i915_private *dev_priv = dev->dev_private;
3910 if (!dev_priv)
3911 return;
3913 I915_WRITE(GEN8_MASTER_IRQ, 0);
3914 POSTING_READ(GEN8_MASTER_IRQ);
3916 gen8_gt_irq_reset(dev_priv);
3918 GEN5_IRQ_RESET(GEN8_PCU_);
3920 spin_lock_irq(&dev_priv->irq_lock);
3921 if (dev_priv->display_irqs_enabled)
3922 vlv_display_irq_reset(dev_priv);
3923 spin_unlock_irq(&dev_priv->irq_lock);
3926 static void ironlake_irq_uninstall(struct drm_device *dev)
3928 struct drm_i915_private *dev_priv = dev->dev_private;
3930 if (!dev_priv)
3931 return;
3933 ironlake_irq_reset(dev);
3936 static void i8xx_irq_preinstall(struct drm_device * dev)
3938 struct drm_i915_private *dev_priv = dev->dev_private;
3939 int pipe;
3941 for_each_pipe(dev_priv, pipe)
3942 I915_WRITE(PIPESTAT(pipe), 0);
3943 I915_WRITE16(IMR, 0xffff);
3944 I915_WRITE16(IER, 0x0);
3945 POSTING_READ16(IER);
3948 static int i8xx_irq_postinstall(struct drm_device *dev)
3950 struct drm_i915_private *dev_priv = dev->dev_private;
3952 I915_WRITE16(EMR,
3953 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3955 /* Unmask the interrupts that we always want on. */
3956 dev_priv->irq_mask =
3957 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3958 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3959 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3960 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3961 I915_WRITE16(IMR, dev_priv->irq_mask);
3963 I915_WRITE16(IER,
3964 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3965 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3966 I915_USER_INTERRUPT);
3967 POSTING_READ16(IER);
3969 /* Interrupt setup is already guaranteed to be single-threaded, this is
3970 * just to make the assert_spin_locked check happy. */
3971 spin_lock_irq(&dev_priv->irq_lock);
3972 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3973 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3974 spin_unlock_irq(&dev_priv->irq_lock);
3976 return 0;
3980 * Returns true when a page flip has completed.
3982 static bool i8xx_handle_vblank(struct drm_device *dev,
3983 int plane, int pipe, u32 iir)
3985 struct drm_i915_private *dev_priv = dev->dev_private;
3986 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3988 if (!intel_pipe_handle_vblank(dev, pipe))
3989 return false;
3991 if ((iir & flip_pending) == 0)
3992 goto check_page_flip;
3994 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3995 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3996 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3997 * the flip is completed (no longer pending). Since this doesn't raise
3998 * an interrupt per se, we watch for the change at vblank.
4000 if (I915_READ16(ISR) & flip_pending)
4001 goto check_page_flip;
4003 intel_prepare_page_flip(dev, plane);
4004 intel_finish_page_flip(dev, pipe);
4005 return true;
4007 check_page_flip:
4008 intel_check_page_flip(dev, pipe);
4009 return false;
4012 static irqreturn_t i8xx_irq_handler(void *arg)
4014 struct drm_device *dev = arg;
4015 struct drm_i915_private *dev_priv = dev->dev_private;
4016 u16 iir, new_iir;
4017 u32 pipe_stats[2];
4018 int pipe;
4019 u16 flip_mask =
4020 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4021 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4023 if (!intel_irqs_enabled(dev_priv))
4024 return IRQ_NONE;
4026 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4027 disable_rpm_wakeref_asserts(dev_priv);
4029 iir = I915_READ16(IIR);
4030 if (iir == 0)
4031 goto out;
4033 while (iir & ~flip_mask) {
4034 /* Can't rely on pipestat interrupt bit in iir as it might
4035 * have been cleared after the pipestat interrupt was received.
4036 * It doesn't set the bit in iir again, but it still produces
4037 * interrupts (for non-MSI).
4039 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4040 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4041 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4043 for_each_pipe(dev_priv, pipe) {
4044 i915_reg_t reg = PIPESTAT(pipe);
4045 pipe_stats[pipe] = I915_READ(reg);
4048 * Clear the PIPE*STAT regs before the IIR
4050 if (pipe_stats[pipe] & 0x8000ffff)
4051 I915_WRITE(reg, pipe_stats[pipe]);
4053 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4055 I915_WRITE16(IIR, iir & ~flip_mask);
4056 new_iir = I915_READ16(IIR); /* Flush posted writes */
4058 if (iir & I915_USER_INTERRUPT)
4059 notify_ring(&dev_priv->engine[RCS]);
4061 for_each_pipe(dev_priv, pipe) {
4062 int plane = pipe;
4063 if (HAS_FBC(dev))
4064 plane = !plane;
4066 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4067 i8xx_handle_vblank(dev, plane, pipe, iir))
4068 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4070 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4071 i9xx_pipe_crc_irq_handler(dev, pipe);
4073 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4074 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4075 pipe);
4078 iir = new_iir;
4081 out:
4082 enable_rpm_wakeref_asserts(dev_priv);
4086 static void i8xx_irq_uninstall(struct drm_device * dev)
4088 struct drm_i915_private *dev_priv = dev->dev_private;
4089 int pipe;
4091 for_each_pipe(dev_priv, pipe) {
4092 /* Clear enable bits; then clear status bits */
4093 I915_WRITE(PIPESTAT(pipe), 0);
4094 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4096 I915_WRITE16(IMR, 0xffff);
4097 I915_WRITE16(IER, 0x0);
4098 I915_WRITE16(IIR, I915_READ16(IIR));
4101 static void i915_irq_preinstall(struct drm_device * dev)
4103 struct drm_i915_private *dev_priv = dev->dev_private;
4104 int pipe;
4106 if (I915_HAS_HOTPLUG(dev)) {
4107 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4108 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4111 I915_WRITE16(HWSTAM, 0xeffe);
4112 for_each_pipe(dev_priv, pipe)
4113 I915_WRITE(PIPESTAT(pipe), 0);
4114 I915_WRITE(IMR, 0xffffffff);
4115 I915_WRITE(IER, 0x0);
4116 POSTING_READ(IER);
4119 static int i915_irq_postinstall(struct drm_device *dev)
4121 struct drm_i915_private *dev_priv = dev->dev_private;
4122 u32 enable_mask;
4124 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4126 /* Unmask the interrupts that we always want on. */
4127 dev_priv->irq_mask =
4128 ~(I915_ASLE_INTERRUPT |
4129 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4130 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4131 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4132 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4134 enable_mask =
4135 I915_ASLE_INTERRUPT |
4136 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4137 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4138 I915_USER_INTERRUPT;
4140 if (I915_HAS_HOTPLUG(dev)) {
4141 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4142 POSTING_READ(PORT_HOTPLUG_EN);
4144 /* Enable in IER... */
4145 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4146 /* and unmask in IMR */
4147 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4150 I915_WRITE(IMR, dev_priv->irq_mask);
4151 I915_WRITE(IER, enable_mask);
4152 POSTING_READ(IER);
4154 i915_enable_asle_pipestat(dev);
4156 /* Interrupt setup is already guaranteed to be single-threaded, this is
4157 * just to make the assert_spin_locked check happy. */
4158 spin_lock_irq(&dev_priv->irq_lock);
4159 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4160 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4161 spin_unlock_irq(&dev_priv->irq_lock);
4163 return 0;
4167 * Returns true when a page flip has completed.
4169 static bool i915_handle_vblank(struct drm_device *dev,
4170 int plane, int pipe, u32 iir)
4172 struct drm_i915_private *dev_priv = dev->dev_private;
4173 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4175 if (!intel_pipe_handle_vblank(dev, pipe))
4176 return false;
4178 if ((iir & flip_pending) == 0)
4179 goto check_page_flip;
4181 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4182 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4183 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4184 * the flip is completed (no longer pending). Since this doesn't raise
4185 * an interrupt per se, we watch for the change at vblank.
4187 if (I915_READ(ISR) & flip_pending)
4188 goto check_page_flip;
4190 intel_prepare_page_flip(dev, plane);
4191 intel_finish_page_flip(dev, pipe);
4192 return true;
4194 check_page_flip:
4195 intel_check_page_flip(dev, pipe);
4196 return false;
4199 static irqreturn_t i915_irq_handler(void *arg)
4201 struct drm_device *dev = arg;
4202 struct drm_i915_private *dev_priv = dev->dev_private;
4203 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4204 u32 flip_mask =
4205 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4206 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4207 int pipe;
4209 if (!intel_irqs_enabled(dev_priv))
4210 return IRQ_NONE;
4212 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4213 disable_rpm_wakeref_asserts(dev_priv);
4215 iir = I915_READ(IIR);
4216 do {
4217 bool irq_received = (iir & ~flip_mask) != 0;
4218 bool blc_event = false;
4220 /* Can't rely on pipestat interrupt bit in iir as it might
4221 * have been cleared after the pipestat interrupt was received.
4222 * It doesn't set the bit in iir again, but it still produces
4223 * interrupts (for non-MSI).
4225 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4226 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4227 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4229 for_each_pipe(dev_priv, pipe) {
4230 i915_reg_t reg = PIPESTAT(pipe);
4231 pipe_stats[pipe] = I915_READ(reg);
4233 /* Clear the PIPE*STAT regs before the IIR */
4234 if (pipe_stats[pipe] & 0x8000ffff) {
4235 I915_WRITE(reg, pipe_stats[pipe]);
4236 irq_received = true;
4239 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4241 if (!irq_received)
4242 break;
4244 /* Consume port. Then clear IIR or we'll miss events */
4245 if (I915_HAS_HOTPLUG(dev) &&
4246 iir & I915_DISPLAY_PORT_INTERRUPT) {
4247 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4248 if (hotplug_status)
4249 i9xx_hpd_irq_handler(dev, hotplug_status);
4252 I915_WRITE(IIR, iir & ~flip_mask);
4253 new_iir = I915_READ(IIR); /* Flush posted writes */
4255 if (iir & I915_USER_INTERRUPT)
4256 notify_ring(&dev_priv->engine[RCS]);
4258 for_each_pipe(dev_priv, pipe) {
4259 int plane = pipe;
4260 if (HAS_FBC(dev))
4261 plane = !plane;
4263 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4264 i915_handle_vblank(dev, plane, pipe, iir))
4265 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4267 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4268 blc_event = true;
4270 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4271 i9xx_pipe_crc_irq_handler(dev, pipe);
4273 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4274 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4275 pipe);
4278 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4279 intel_opregion_asle_intr(dev);
4281 /* With MSI, interrupts are only generated when iir
4282 * transitions from zero to nonzero. If another bit got
4283 * set while we were handling the existing iir bits, then
4284 * we would never get another interrupt.
4286 * This is fine on non-MSI as well, as if we hit this path
4287 * we avoid exiting the interrupt handler only to generate
4288 * another one.
4290 * Note that for MSI this could cause a stray interrupt report
4291 * if an interrupt landed in the time between writing IIR and
4292 * the posting read. This should be rare enough to never
4293 * trigger the 99% of 100,000 interrupts test for disabling
4294 * stray interrupts.
4296 iir = new_iir;
4297 } while (iir & ~flip_mask);
4299 enable_rpm_wakeref_asserts(dev_priv);
4303 static void i915_irq_uninstall(struct drm_device * dev)
4305 struct drm_i915_private *dev_priv = dev->dev_private;
4306 int pipe;
4308 if (I915_HAS_HOTPLUG(dev)) {
4309 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4310 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4313 I915_WRITE16(HWSTAM, 0xffff);
4314 for_each_pipe(dev_priv, pipe) {
4315 /* Clear enable bits; then clear status bits */
4316 I915_WRITE(PIPESTAT(pipe), 0);
4317 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4319 I915_WRITE(IMR, 0xffffffff);
4320 I915_WRITE(IER, 0x0);
4322 I915_WRITE(IIR, I915_READ(IIR));
4325 static void i965_irq_preinstall(struct drm_device * dev)
4327 struct drm_i915_private *dev_priv = dev->dev_private;
4328 int pipe;
4330 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4331 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4333 I915_WRITE(HWSTAM, 0xeffe);
4334 for_each_pipe(dev_priv, pipe)
4335 I915_WRITE(PIPESTAT(pipe), 0);
4336 I915_WRITE(IMR, 0xffffffff);
4337 I915_WRITE(IER, 0x0);
4338 POSTING_READ(IER);
4341 static int i965_irq_postinstall(struct drm_device *dev)
4343 struct drm_i915_private *dev_priv = dev->dev_private;
4344 u32 enable_mask;
4345 u32 error_mask;
4347 /* Unmask the interrupts that we always want on. */
4348 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4349 I915_DISPLAY_PORT_INTERRUPT |
4350 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4351 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4352 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4353 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4354 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4356 enable_mask = ~dev_priv->irq_mask;
4357 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4358 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4359 enable_mask |= I915_USER_INTERRUPT;
4361 if (IS_G4X(dev))
4362 enable_mask |= I915_BSD_USER_INTERRUPT;
4364 /* Interrupt setup is already guaranteed to be single-threaded, this is
4365 * just to make the assert_spin_locked check happy. */
4366 spin_lock_irq(&dev_priv->irq_lock);
4367 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4368 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4369 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4370 spin_unlock_irq(&dev_priv->irq_lock);
4373 * Enable some error detection, note the instruction error mask
4374 * bit is reserved, so we leave it masked.
4376 if (IS_G4X(dev)) {
4377 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4378 GM45_ERROR_MEM_PRIV |
4379 GM45_ERROR_CP_PRIV |
4380 I915_ERROR_MEMORY_REFRESH);
4381 } else {
4382 error_mask = ~(I915_ERROR_PAGE_TABLE |
4383 I915_ERROR_MEMORY_REFRESH);
4385 I915_WRITE(EMR, error_mask);
4387 I915_WRITE(IMR, dev_priv->irq_mask);
4388 I915_WRITE(IER, enable_mask);
4389 POSTING_READ(IER);
4391 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4392 POSTING_READ(PORT_HOTPLUG_EN);
4394 i915_enable_asle_pipestat(dev);
4396 return 0;
4399 static void i915_hpd_irq_setup(struct drm_device *dev)
4401 struct drm_i915_private *dev_priv = dev->dev_private;
4402 u32 hotplug_en;
4404 assert_spin_locked(&dev_priv->irq_lock);
4406 /* Note HDMI and DP share hotplug bits */
4407 /* enable bits are the same for all generations */
4408 hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
4409 /* Programming the CRT detection parameters tends
4410 to generate a spurious hotplug event about three
4411 seconds later. So just do it once.
4413 if (IS_G4X(dev))
4414 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4415 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4417 /* Ignore TV since it's buggy */
4418 i915_hotplug_interrupt_update_locked(dev_priv,
4419 HOTPLUG_INT_EN_MASK |
4420 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4421 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4422 hotplug_en);
4425 static irqreturn_t i965_irq_handler(void *arg)
4427 struct drm_device *dev = arg;
4428 struct drm_i915_private *dev_priv = dev->dev_private;
4429 u32 iir, new_iir;
4430 u32 pipe_stats[I915_MAX_PIPES];
4431 int pipe;
4432 u32 flip_mask =
4433 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4434 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4436 if (!intel_irqs_enabled(dev_priv))
4437 return IRQ_NONE;
4439 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4440 disable_rpm_wakeref_asserts(dev_priv);
4442 iir = I915_READ(IIR);
4444 for (;;) {
4445 bool irq_received = (iir & ~flip_mask) != 0;
4446 bool blc_event = false;
4448 /* Can't rely on pipestat interrupt bit in iir as it might
4449 * have been cleared after the pipestat interrupt was received.
4450 * It doesn't set the bit in iir again, but it still produces
4451 * interrupts (for non-MSI).
4453 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4454 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4455 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4457 for_each_pipe(dev_priv, pipe) {
4458 i915_reg_t reg = PIPESTAT(pipe);
4459 pipe_stats[pipe] = I915_READ(reg);
4462 * Clear the PIPE*STAT regs before the IIR
4464 if (pipe_stats[pipe] & 0x8000ffff) {
4465 I915_WRITE(reg, pipe_stats[pipe]);
4466 irq_received = true;
4469 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4471 if (!irq_received)
4472 break;
4475 /* Consume port. Then clear IIR or we'll miss events */
4476 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
4477 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4478 if (hotplug_status)
4479 i9xx_hpd_irq_handler(dev, hotplug_status);
4482 I915_WRITE(IIR, iir & ~flip_mask);
4483 new_iir = I915_READ(IIR); /* Flush posted writes */
4485 if (iir & I915_USER_INTERRUPT)
4486 notify_ring(&dev_priv->engine[RCS]);
4487 if (iir & I915_BSD_USER_INTERRUPT)
4488 notify_ring(&dev_priv->engine[VCS]);
4490 for_each_pipe(dev_priv, pipe) {
4491 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4492 i915_handle_vblank(dev, pipe, pipe, iir))
4493 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4495 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4496 blc_event = true;
4498 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4499 i9xx_pipe_crc_irq_handler(dev, pipe);
4501 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4502 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4505 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4506 intel_opregion_asle_intr(dev);
4508 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4509 gmbus_irq_handler(dev);
4511 /* With MSI, interrupts are only generated when iir
4512 * transitions from zero to nonzero. If another bit got
4513 * set while we were handling the existing iir bits, then
4514 * we would never get another interrupt.
4516 * This is fine on non-MSI as well, as if we hit this path
4517 * we avoid exiting the interrupt handler only to generate
4518 * another one.
4520 * Note that for MSI this could cause a stray interrupt report
4521 * if an interrupt landed in the time between writing IIR and
4522 * the posting read. This should be rare enough to never
4523 * trigger the 99% of 100,000 interrupts test for disabling
4524 * stray interrupts.
4526 iir = new_iir;
4529 enable_rpm_wakeref_asserts(dev_priv);
4533 static void i965_irq_uninstall(struct drm_device * dev)
4535 struct drm_i915_private *dev_priv = dev->dev_private;
4536 int pipe;
4538 if (!dev_priv)
4539 return;
4541 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4542 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4544 I915_WRITE(HWSTAM, 0xffffffff);
4545 for_each_pipe(dev_priv, pipe)
4546 I915_WRITE(PIPESTAT(pipe), 0);
4547 I915_WRITE(IMR, 0xffffffff);
4548 I915_WRITE(IER, 0x0);
4550 for_each_pipe(dev_priv, pipe)
4551 I915_WRITE(PIPESTAT(pipe),
4552 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4553 I915_WRITE(IIR, I915_READ(IIR));
4557 * intel_irq_init - initializes irq support
4558 * @dev_priv: i915 device instance
4560 * This function initializes all the irq support including work items, timers
4561 * and all the vtables. It does not setup the interrupt itself though.
4563 void intel_irq_init(struct drm_i915_private *dev_priv)
4565 struct drm_device *dev = dev_priv->dev;
4567 intel_hpd_init_work(dev_priv);
4569 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4570 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4572 /* Let's track the enabled rps events */
4573 if (IS_VALLEYVIEW(dev_priv))
4574 /* WaGsvRC0ResidencyMethod:vlv */
4575 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4576 else
4577 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4579 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4580 i915_hangcheck_elapsed);
4582 if (IS_GEN2(dev_priv)) {
4583 dev->max_vblank_count = 0;
4584 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4585 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4586 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4587 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4588 } else {
4589 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4590 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4594 * Opt out of the vblank disable timer on everything except gen2.
4595 * Gen2 doesn't have a hardware frame counter and so depends on
4596 * vblank interrupts to produce sane vblank seuquence numbers.
4598 if (!IS_GEN2(dev_priv))
4599 dev->vblank_disable_immediate = true;
4601 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4602 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4604 if (IS_CHERRYVIEW(dev_priv)) {
4605 dev->driver->irq_handler = cherryview_irq_handler;
4606 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4607 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4608 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4609 dev->driver->enable_vblank = valleyview_enable_vblank;
4610 dev->driver->disable_vblank = valleyview_disable_vblank;
4611 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4612 } else if (IS_VALLEYVIEW(dev_priv)) {
4613 dev->driver->irq_handler = valleyview_irq_handler;
4614 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4615 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4616 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4617 dev->driver->enable_vblank = valleyview_enable_vblank;
4618 dev->driver->disable_vblank = valleyview_disable_vblank;
4619 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4620 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4621 dev->driver->irq_handler = gen8_irq_handler;
4622 dev->driver->irq_preinstall = gen8_irq_reset;
4623 dev->driver->irq_postinstall = gen8_irq_postinstall;
4624 dev->driver->irq_uninstall = gen8_irq_uninstall;
4625 dev->driver->enable_vblank = gen8_enable_vblank;
4626 dev->driver->disable_vblank = gen8_disable_vblank;
4627 if (IS_BROXTON(dev))
4628 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4629 else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
4630 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4631 else
4632 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4633 } else if (HAS_PCH_SPLIT(dev)) {
4634 dev->driver->irq_handler = ironlake_irq_handler;
4635 dev->driver->irq_preinstall = ironlake_irq_reset;
4636 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4637 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4638 dev->driver->enable_vblank = ironlake_enable_vblank;
4639 dev->driver->disable_vblank = ironlake_disable_vblank;
4640 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4641 } else {
4642 if (INTEL_INFO(dev_priv)->gen == 2) {
4643 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4644 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4645 dev->driver->irq_handler = i8xx_irq_handler;
4646 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4647 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4648 dev->driver->irq_preinstall = i915_irq_preinstall;
4649 dev->driver->irq_postinstall = i915_irq_postinstall;
4650 dev->driver->irq_uninstall = i915_irq_uninstall;
4651 dev->driver->irq_handler = i915_irq_handler;
4652 } else {
4653 dev->driver->irq_preinstall = i965_irq_preinstall;
4654 dev->driver->irq_postinstall = i965_irq_postinstall;
4655 dev->driver->irq_uninstall = i965_irq_uninstall;
4656 dev->driver->irq_handler = i965_irq_handler;
4658 if (I915_HAS_HOTPLUG(dev_priv))
4659 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4660 dev->driver->enable_vblank = i915_enable_vblank;
4661 dev->driver->disable_vblank = i915_disable_vblank;
4666 * intel_irq_install - enables the hardware interrupt
4667 * @dev_priv: i915 device instance
4669 * This function enables the hardware interrupt handling, but leaves the hotplug
4670 * handling still disabled. It is called after intel_irq_init().
4672 * In the driver load and resume code we need working interrupts in a few places
4673 * but don't want to deal with the hassle of concurrent probe and hotplug
4674 * workers. Hence the split into this two-stage approach.
4676 int intel_irq_install(struct drm_i915_private *dev_priv)
4679 * We enable some interrupt sources in our postinstall hooks, so mark
4680 * interrupts as enabled _before_ actually enabling them to avoid
4681 * special cases in our ordering checks.
4683 dev_priv->pm.irqs_enabled = true;
4685 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4689 * intel_irq_uninstall - finilizes all irq handling
4690 * @dev_priv: i915 device instance
4692 * This stops interrupt and hotplug handling and unregisters and frees all
4693 * resources acquired in the init functions.
4695 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4697 drm_irq_uninstall(dev_priv->dev);
4698 intel_hpd_cancel_work(dev_priv);
4699 dev_priv->pm.irqs_enabled = false;
4703 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4704 * @dev_priv: i915 device instance
4706 * This function is used to disable interrupts at runtime, both in the runtime
4707 * pm and the system suspend/resume code.
4709 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4711 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4712 dev_priv->pm.irqs_enabled = false;
4713 synchronize_irq(dev_priv->dev->irq);
4717 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4718 * @dev_priv: i915 device instance
4720 * This function is used to enable interrupts at runtime, both in the runtime
4721 * pm and the system suspend/resume code.
4723 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4725 dev_priv->pm.irqs_enabled = true;
4726 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4727 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);