1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #include <linux/device.h>
32 #include <drm/i915_drm.h>
34 #include "i915_trace.h"
35 #include "intel_drv.h"
37 #include <linux/console.h>
38 #include <linux/module.h>
39 #include <linux/vga_switcheroo.h>
40 #include <drm/drm_crtc_helper.h>
42 static struct drm_driver driver
;
44 #define GEN_DEFAULT_PIPEOFFSETS \
45 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
46 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
47 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
48 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
49 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
51 #define GEN_CHV_PIPEOFFSETS \
52 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
53 CHV_PIPE_C_OFFSET }, \
54 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
55 CHV_TRANSCODER_C_OFFSET, }, \
56 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
57 CHV_PALETTE_C_OFFSET }
59 #define CURSOR_OFFSETS \
60 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
62 #define IVB_CURSOR_OFFSETS \
63 .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
66 .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
68 .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
70 static const struct intel_device_info intel_i830_info
= {
71 .gen
= 2, .is_mobile
= 1, .cursor_needs_physical
= 1, .num_pipes
= 2,
72 .has_overlay
= 1, .overlay_needs_physical
= 1,
73 .ring_mask
= RENDER_RING
,
74 GEN_DEFAULT_PIPEOFFSETS
,
78 static const struct intel_device_info intel_845g_info
= {
79 .gen
= 2, .num_pipes
= 1,
80 .has_overlay
= 1, .overlay_needs_physical
= 1,
81 .ring_mask
= RENDER_RING
,
82 GEN_DEFAULT_PIPEOFFSETS
,
86 static const struct intel_device_info intel_i85x_info
= {
87 .gen
= 2, .is_i85x
= 1, .is_mobile
= 1, .num_pipes
= 2,
88 .cursor_needs_physical
= 1,
89 .has_overlay
= 1, .overlay_needs_physical
= 1,
91 .ring_mask
= RENDER_RING
,
92 GEN_DEFAULT_PIPEOFFSETS
,
96 static const struct intel_device_info intel_i865g_info
= {
97 .gen
= 2, .num_pipes
= 1,
98 .has_overlay
= 1, .overlay_needs_physical
= 1,
99 .ring_mask
= RENDER_RING
,
100 GEN_DEFAULT_PIPEOFFSETS
,
104 static const struct intel_device_info intel_i915g_info
= {
105 .gen
= 3, .is_i915g
= 1, .cursor_needs_physical
= 1, .num_pipes
= 2,
106 .has_overlay
= 1, .overlay_needs_physical
= 1,
107 .ring_mask
= RENDER_RING
,
108 GEN_DEFAULT_PIPEOFFSETS
,
111 static const struct intel_device_info intel_i915gm_info
= {
112 .gen
= 3, .is_mobile
= 1, .num_pipes
= 2,
113 .cursor_needs_physical
= 1,
114 .has_overlay
= 1, .overlay_needs_physical
= 1,
117 .ring_mask
= RENDER_RING
,
118 GEN_DEFAULT_PIPEOFFSETS
,
121 static const struct intel_device_info intel_i945g_info
= {
122 .gen
= 3, .has_hotplug
= 1, .cursor_needs_physical
= 1, .num_pipes
= 2,
123 .has_overlay
= 1, .overlay_needs_physical
= 1,
124 .ring_mask
= RENDER_RING
,
125 GEN_DEFAULT_PIPEOFFSETS
,
128 static const struct intel_device_info intel_i945gm_info
= {
129 .gen
= 3, .is_i945gm
= 1, .is_mobile
= 1, .num_pipes
= 2,
130 .has_hotplug
= 1, .cursor_needs_physical
= 1,
131 .has_overlay
= 1, .overlay_needs_physical
= 1,
134 .ring_mask
= RENDER_RING
,
135 GEN_DEFAULT_PIPEOFFSETS
,
139 static const struct intel_device_info intel_i965g_info
= {
140 .gen
= 4, .is_broadwater
= 1, .num_pipes
= 2,
143 .ring_mask
= RENDER_RING
,
144 GEN_DEFAULT_PIPEOFFSETS
,
148 static const struct intel_device_info intel_i965gm_info
= {
149 .gen
= 4, .is_crestline
= 1, .num_pipes
= 2,
150 .is_mobile
= 1, .has_fbc
= 1, .has_hotplug
= 1,
153 .ring_mask
= RENDER_RING
,
154 GEN_DEFAULT_PIPEOFFSETS
,
158 static const struct intel_device_info intel_g33_info
= {
159 .gen
= 3, .is_g33
= 1, .num_pipes
= 2,
160 .need_gfx_hws
= 1, .has_hotplug
= 1,
162 .ring_mask
= RENDER_RING
,
163 GEN_DEFAULT_PIPEOFFSETS
,
167 static const struct intel_device_info intel_g45_info
= {
168 .gen
= 4, .is_g4x
= 1, .need_gfx_hws
= 1, .num_pipes
= 2,
169 .has_pipe_cxsr
= 1, .has_hotplug
= 1,
170 .ring_mask
= RENDER_RING
| BSD_RING
,
171 GEN_DEFAULT_PIPEOFFSETS
,
175 static const struct intel_device_info intel_gm45_info
= {
176 .gen
= 4, .is_g4x
= 1, .num_pipes
= 2,
177 .is_mobile
= 1, .need_gfx_hws
= 1, .has_fbc
= 1,
178 .has_pipe_cxsr
= 1, .has_hotplug
= 1,
180 .ring_mask
= RENDER_RING
| BSD_RING
,
181 GEN_DEFAULT_PIPEOFFSETS
,
185 static const struct intel_device_info intel_pineview_info
= {
186 .gen
= 3, .is_g33
= 1, .is_pineview
= 1, .is_mobile
= 1, .num_pipes
= 2,
187 .need_gfx_hws
= 1, .has_hotplug
= 1,
189 GEN_DEFAULT_PIPEOFFSETS
,
193 static const struct intel_device_info intel_ironlake_d_info
= {
194 .gen
= 5, .num_pipes
= 2,
195 .need_gfx_hws
= 1, .has_hotplug
= 1,
196 .ring_mask
= RENDER_RING
| BSD_RING
,
197 GEN_DEFAULT_PIPEOFFSETS
,
201 static const struct intel_device_info intel_ironlake_m_info
= {
202 .gen
= 5, .is_mobile
= 1, .num_pipes
= 2,
203 .need_gfx_hws
= 1, .has_hotplug
= 1,
205 .ring_mask
= RENDER_RING
| BSD_RING
,
206 GEN_DEFAULT_PIPEOFFSETS
,
210 static const struct intel_device_info intel_sandybridge_d_info
= {
211 .gen
= 6, .num_pipes
= 2,
212 .need_gfx_hws
= 1, .has_hotplug
= 1,
214 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
,
216 GEN_DEFAULT_PIPEOFFSETS
,
220 static const struct intel_device_info intel_sandybridge_m_info
= {
221 .gen
= 6, .is_mobile
= 1, .num_pipes
= 2,
222 .need_gfx_hws
= 1, .has_hotplug
= 1,
224 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
,
226 GEN_DEFAULT_PIPEOFFSETS
,
230 #define GEN7_FEATURES \
231 .gen = 7, .num_pipes = 3, \
232 .need_gfx_hws = 1, .has_hotplug = 1, \
234 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
236 GEN_DEFAULT_PIPEOFFSETS, \
239 static const struct intel_device_info intel_ivybridge_d_info
= {
244 static const struct intel_device_info intel_ivybridge_m_info
= {
250 static const struct intel_device_info intel_ivybridge_q_info
= {
253 .num_pipes
= 0, /* legal, last one wins */
256 #define VLV_FEATURES \
257 .gen = 7, .num_pipes = 2, \
258 .need_gfx_hws = 1, .has_hotplug = 1, \
259 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
260 .display_mmio_offset = VLV_DISPLAY_BASE, \
261 GEN_DEFAULT_PIPEOFFSETS, \
264 static const struct intel_device_info intel_valleyview_m_info
= {
270 static const struct intel_device_info intel_valleyview_d_info
= {
275 #define HSW_FEATURES \
277 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
281 static const struct intel_device_info intel_haswell_d_info
= {
286 static const struct intel_device_info intel_haswell_m_info
= {
292 #define BDW_FEATURES \
296 static const struct intel_device_info intel_broadwell_d_info
= {
301 static const struct intel_device_info intel_broadwell_m_info
= {
303 .gen
= 8, .is_mobile
= 1,
306 static const struct intel_device_info intel_broadwell_gt3d_info
= {
309 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
| VEBOX_RING
| BSD2_RING
,
312 static const struct intel_device_info intel_broadwell_gt3m_info
= {
314 .gen
= 8, .is_mobile
= 1,
315 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
| VEBOX_RING
| BSD2_RING
,
318 static const struct intel_device_info intel_cherryview_info
= {
319 .gen
= 8, .num_pipes
= 3,
320 .need_gfx_hws
= 1, .has_hotplug
= 1,
321 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
| VEBOX_RING
,
323 .display_mmio_offset
= VLV_DISPLAY_BASE
,
329 static const struct intel_device_info intel_skylake_info
= {
335 static const struct intel_device_info intel_skylake_gt3_info
= {
339 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
| VEBOX_RING
| BSD2_RING
,
342 static const struct intel_device_info intel_broxton_info
= {
346 .need_gfx_hws
= 1, .has_hotplug
= 1,
347 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
| VEBOX_RING
,
352 GEN_DEFAULT_PIPEOFFSETS
,
357 static const struct intel_device_info intel_kabylake_info
= {
363 static const struct intel_device_info intel_kabylake_gt3_info
= {
367 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
| VEBOX_RING
| BSD2_RING
,
370 static const struct intel_device_info intel_coffeelake_info
= {
376 static const struct intel_device_info intel_coffeelake_gt3_info
= {
380 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
| VEBOX_RING
| BSD2_RING
,
384 * Make sure any device matches here are from most specific to most
385 * general. For example, since the Quanta match is based on the subsystem
386 * and subvendor IDs, we need it to come before the more general IVB
387 * PCI ID matches, otherwise we'll use the wrong info struct above.
390 static const struct pci_device_id pciidlist
[] = {
391 INTEL_I830_IDS(&intel_i830_info
),
392 INTEL_I845G_IDS(&intel_845g_info
),
393 INTEL_I85X_IDS(&intel_i85x_info
),
394 INTEL_I865G_IDS(&intel_i865g_info
),
395 INTEL_I915G_IDS(&intel_i915g_info
),
396 INTEL_I915GM_IDS(&intel_i915gm_info
),
397 INTEL_I945G_IDS(&intel_i945g_info
),
398 INTEL_I945GM_IDS(&intel_i945gm_info
),
399 INTEL_I965G_IDS(&intel_i965g_info
),
400 INTEL_G33_IDS(&intel_g33_info
),
401 INTEL_I965GM_IDS(&intel_i965gm_info
),
402 INTEL_GM45_IDS(&intel_gm45_info
),
403 INTEL_G45_IDS(&intel_g45_info
),
404 INTEL_PINEVIEW_IDS(&intel_pineview_info
),
405 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info
),
406 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info
),
407 INTEL_SNB_D_IDS(&intel_sandybridge_d_info
),
408 INTEL_SNB_M_IDS(&intel_sandybridge_m_info
),
409 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info
), /* must be first IVB */
410 INTEL_IVB_M_IDS(&intel_ivybridge_m_info
),
411 INTEL_IVB_D_IDS(&intel_ivybridge_d_info
),
412 INTEL_HSW_D_IDS(&intel_haswell_d_info
),
413 INTEL_HSW_M_IDS(&intel_haswell_m_info
),
414 INTEL_VLV_M_IDS(&intel_valleyview_m_info
),
415 INTEL_VLV_D_IDS(&intel_valleyview_d_info
),
416 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info
),
417 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info
),
418 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info
),
419 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info
),
420 INTEL_CHV_IDS(&intel_cherryview_info
),
421 INTEL_SKL_GT1_IDS(&intel_skylake_info
),
422 INTEL_SKL_GT2_IDS(&intel_skylake_info
),
423 INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info
),
424 INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info
),
425 INTEL_BXT_IDS(&intel_broxton_info
),
426 INTEL_KBL_GT1_IDS(&intel_kabylake_info
),
427 INTEL_KBL_GT2_IDS(&intel_kabylake_info
),
428 INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info
),
429 INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info
),
430 INTEL_CFL_S_IDS(&intel_coffeelake_info
),
431 INTEL_CFL_H_IDS(&intel_coffeelake_info
),
432 INTEL_CFL_U_IDS(&intel_coffeelake_gt3_info
),
436 #define PCI_VENDOR_INTEL 0x8086
438 static enum intel_pch
intel_virt_detect_pch(struct drm_device
*dev
)
440 enum intel_pch ret
= PCH_NOP
;
443 * In a virtualized passthrough environment we can be in a
444 * setup where the ISA bridge is not able to be passed through.
445 * In this case, a south bridge can be emulated and we have to
446 * make an educated guess as to which PCH is really there.
451 DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
452 } else if (IS_GEN6(dev
) || IS_IVYBRIDGE(dev
)) {
454 DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
455 } else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
457 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
458 } else if (IS_SKYLAKE(dev
) || IS_KABYLAKE(dev
)) {
460 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
466 void intel_detect_pch(struct drm_device
*dev
)
468 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
470 struct pci_devinfo
*di
;
472 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
473 * (which really amounts to a PCH but no South Display).
475 if (INTEL_INFO(dev
)->num_pipes
== 0) {
476 dev_priv
->pch_type
= PCH_NOP
;
480 /* XXX The ISA bridge probe causes some old Core2 machines to hang */
481 if (INTEL_INFO(dev
)->gen
< 5)
485 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
486 * make graphics device passthrough work easy for VMM, that only
487 * need to expose ISA bridge to let driver know the real hardware
488 * underneath. This is a requirement from virtualization team.
490 * In some virtualized environments (e.g. XEN), there is irrelevant
491 * ISA bridge in the system. To work reliably, we should scan trhough
492 * all the ISA bridge devices and check for the first match, instead
493 * of only checking the first one.
497 while ((pch
= pci_iterate_class(&di
, PCIC_BRIDGE
, PCIS_BRIDGE_ISA
))) {
498 if (pci_get_vendor(pch
) == PCI_VENDOR_INTEL
) {
499 unsigned short id
= pci_get_device(pch
) & INTEL_PCH_DEVICE_ID_MASK
;
500 dev_priv
->pch_id
= id
;
502 if (id
== INTEL_PCH_IBX_DEVICE_ID_TYPE
) {
503 dev_priv
->pch_type
= PCH_IBX
;
504 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
505 WARN_ON(!IS_GEN5(dev
));
506 } else if (id
== INTEL_PCH_CPT_DEVICE_ID_TYPE
) {
507 dev_priv
->pch_type
= PCH_CPT
;
508 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
509 WARN_ON(!(IS_GEN6(dev
) || IS_IVYBRIDGE(dev
)));
510 } else if (id
== INTEL_PCH_PPT_DEVICE_ID_TYPE
) {
511 /* PantherPoint is CPT compatible */
512 dev_priv
->pch_type
= PCH_CPT
;
513 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
514 WARN_ON(!(IS_GEN6(dev
) || IS_IVYBRIDGE(dev
)));
515 } else if (id
== INTEL_PCH_LPT_DEVICE_ID_TYPE
) {
516 dev_priv
->pch_type
= PCH_LPT
;
517 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
518 WARN_ON(!IS_HASWELL(dev
) && !IS_BROADWELL(dev
));
519 WARN_ON(IS_HSW_ULT(dev
) || IS_BDW_ULT(dev
));
520 } else if (id
== INTEL_PCH_LPT_LP_DEVICE_ID_TYPE
) {
521 dev_priv
->pch_type
= PCH_LPT
;
522 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
523 WARN_ON(!IS_HASWELL(dev
) && !IS_BROADWELL(dev
));
524 WARN_ON(!IS_HSW_ULT(dev
) && !IS_BDW_ULT(dev
));
525 } else if (id
== INTEL_PCH_SPT_DEVICE_ID_TYPE
) {
526 dev_priv
->pch_type
= PCH_SPT
;
527 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
528 WARN_ON(!IS_SKYLAKE(dev
) &&
530 } else if (id
== INTEL_PCH_SPT_LP_DEVICE_ID_TYPE
) {
531 dev_priv
->pch_type
= PCH_SPT
;
532 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
533 WARN_ON(!IS_SKYLAKE(dev
) &&
535 } else if (id
== INTEL_PCH_KBP_DEVICE_ID_TYPE
) {
536 dev_priv
->pch_type
= PCH_KBP
;
537 DRM_DEBUG_KMS("Found KabyPoint PCH\n");
538 WARN_ON(!IS_KABYLAKE(dev
));
539 } else if ((id
== INTEL_PCH_P2X_DEVICE_ID_TYPE
) ||
540 (id
== INTEL_PCH_P3X_DEVICE_ID_TYPE
) ||
541 ((id
== INTEL_PCH_QEMU_DEVICE_ID_TYPE
) &&
543 dev_priv
->pch_type
= intel_virt_detect_pch(dev
);
551 DRM_DEBUG_KMS("No PCH found.\n");
558 bool i915_semaphore_is_enabled(struct drm_device
*dev
)
560 if (INTEL_INFO(dev
)->gen
< 6)
563 if (i915
.semaphores
>= 0)
564 return i915
.semaphores
;
566 /* TODO: make semaphores and Execlists play nicely together */
567 if (i915
.enable_execlists
)
570 /* Until we get further testing... */
574 #ifdef CONFIG_INTEL_IOMMU
575 /* Enable semaphores on SNB when IO remapping is off */
576 if (INTEL_INFO(dev
)->gen
== 6 && intel_iommu_gfx_mapped
)
584 #define IS_BUILTIN(blah) 0
587 static void intel_suspend_encoders(struct drm_i915_private
*dev_priv
)
589 struct drm_device
*dev
= dev_priv
->dev
;
590 struct intel_encoder
*encoder
;
592 drm_modeset_lock_all(dev
);
593 for_each_intel_encoder(dev
, encoder
)
594 if (encoder
->suspend
)
595 encoder
->suspend(encoder
);
596 drm_modeset_unlock_all(dev
);
599 static int vlv_resume_prepare(struct drm_i915_private
*dev_priv
,
601 static int vlv_suspend_complete(struct drm_i915_private
*dev_priv
);
603 static bool suspend_to_idle(struct drm_i915_private
*dev_priv
)
605 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
606 if (acpi_target_system_state() < ACPI_STATE_S3
)
612 static int i915_drm_suspend(struct drm_device
*dev
)
614 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
615 pci_power_t opregion_target_state
;
618 /* ignore lid events during suspend */
619 mutex_lock(&dev_priv
->modeset_restore_lock
);
620 dev_priv
->modeset_restore
= MODESET_SUSPENDED
;
621 mutex_unlock(&dev_priv
->modeset_restore_lock
);
623 disable_rpm_wakeref_asserts(dev_priv
);
625 /* We do a lot of poking in a lot of registers, make sure they work
627 intel_display_set_init_power(dev_priv
, true);
629 drm_kms_helper_poll_disable(dev
);
632 pci_save_state(dev
->pdev
);
635 error
= i915_gem_suspend(dev
);
638 "GEM idle failed, resume might fail\n");
642 intel_guc_suspend(dev
);
644 intel_suspend_gt_powersave(dev
);
646 intel_display_suspend(dev
);
649 intel_dp_mst_suspend(dev
);
652 intel_runtime_pm_disable_interrupts(dev_priv
);
653 intel_hpd_cancel_work(dev_priv
);
655 intel_suspend_encoders(dev_priv
);
657 intel_suspend_hw(dev
);
659 i915_gem_suspend_gtt_mappings(dev
);
661 i915_save_state(dev
);
663 opregion_target_state
= suspend_to_idle(dev_priv
) ? PCI_D1
: PCI_D3cold
;
664 intel_opregion_notify_adapter(dev
, opregion_target_state
);
666 intel_uncore_forcewake_reset(dev
, false);
667 intel_opregion_fini(dev
);
670 intel_fbdev_set_suspend(dev
, FBINFO_STATE_SUSPENDED
, true);
673 dev_priv
->suspend_count
++;
675 intel_display_set_init_power(dev_priv
, false);
677 intel_csr_ucode_suspend(dev_priv
);
680 enable_rpm_wakeref_asserts(dev_priv
);
685 static int i915_drm_suspend_late(struct drm_device
*drm_dev
, bool hibernation
)
687 struct drm_i915_private
*dev_priv
= drm_dev
->dev_private
;
691 disable_rpm_wakeref_asserts(dev_priv
);
693 fw_csr
= !IS_BROXTON(dev_priv
) &&
694 suspend_to_idle(dev_priv
) && dev_priv
->csr
.dmc_payload
;
696 * In case of firmware assisted context save/restore don't manually
697 * deinit the power domains. This also means the CSR/DMC firmware will
698 * stay active, it will power down any HW resources as required and
699 * also enable deeper system power states that would be blocked if the
700 * firmware was inactive.
703 intel_power_domains_suspend(dev_priv
);
706 if (IS_BROXTON(dev_priv
))
707 bxt_enable_dc9(dev_priv
);
708 else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
709 hsw_enable_pc8(dev_priv
);
710 else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
711 ret
= vlv_suspend_complete(dev_priv
);
714 DRM_ERROR("Suspend complete failed: %d\n", ret
);
716 intel_power_domains_init_hw(dev_priv
, true);
722 pci_disable_device(drm_dev
->pdev
);
724 * During hibernation on some platforms the BIOS may try to access
725 * the device even though it's already in D3 and hang the machine. So
726 * leave the device in D0 on those platforms and hope the BIOS will
727 * power down the device properly. The issue was seen on multiple old
728 * GENs with different BIOS vendors, so having an explicit blacklist
729 * is inpractical; apply the workaround on everything pre GEN6. The
730 * platforms where the issue was seen:
731 * Lenovo Thinkpad X301, X61s, X60, T60, X41
735 if (!(hibernation
&& INTEL_INFO(dev_priv
)->gen
< 6))
736 pci_set_power_state(drm_dev
->pdev
, PCI_D3hot
);
739 dev_priv
->suspended_to_idle
= suspend_to_idle(dev_priv
);
742 enable_rpm_wakeref_asserts(dev_priv
);
747 int i915_suspend_switcheroo(device_t kdev
)
749 struct drm_device
*dev
= device_get_softc(kdev
);
752 if (!dev
|| !dev
->dev_private
) {
753 DRM_ERROR("dev: %p\n", dev
);
754 DRM_ERROR("DRM not initialized, aborting suspend.\n");
759 if (WARN_ON_ONCE(state
.event
!= PM_EVENT_SUSPEND
&&
760 state
.event
!= PM_EVENT_FREEZE
))
764 if (dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
767 error
= i915_drm_suspend(dev
);
771 return i915_drm_suspend_late(dev
, false);
774 static int i915_drm_resume(struct drm_device
*dev
)
776 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
779 disable_rpm_wakeref_asserts(dev_priv
);
781 ret
= i915_ggtt_enable_hw(dev
);
783 DRM_ERROR("failed to re-enable GGTT\n");
785 intel_csr_ucode_resume(dev_priv
);
787 mutex_lock(&dev
->struct_mutex
);
788 i915_gem_restore_gtt_mappings(dev
);
789 mutex_unlock(&dev
->struct_mutex
);
791 i915_restore_state(dev
);
792 intel_opregion_setup(dev
);
794 intel_init_pch_refclk(dev
);
795 drm_mode_config_reset(dev
);
798 * Interrupts have to be enabled before any batches are run. If not the
799 * GPU will hang. i915_gem_init_hw() will initiate batches to
800 * update/restore the context.
802 * Modeset enabling in intel_modeset_init_hw() also needs working
805 intel_runtime_pm_enable_interrupts(dev_priv
);
807 mutex_lock(&dev
->struct_mutex
);
808 if (i915_gem_init_hw(dev
)) {
809 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
810 atomic_or(I915_WEDGED
, &dev_priv
->gpu_error
.reset_counter
);
812 mutex_unlock(&dev
->struct_mutex
);
814 intel_guc_resume(dev
);
816 intel_modeset_init_hw(dev
);
818 spin_lock_irq(&dev_priv
->irq_lock
);
819 if (dev_priv
->display
.hpd_irq_setup
)
820 dev_priv
->display
.hpd_irq_setup(dev
);
821 spin_unlock_irq(&dev_priv
->irq_lock
);
823 intel_dp_mst_resume(dev
);
825 intel_display_resume(dev
);
828 * ... but also need to make sure that hotplug processing
829 * doesn't cause havoc. Like in the driver load code we don't
830 * bother with the tiny race here where we might loose hotplug
833 intel_hpd_init(dev_priv
);
834 /* Config may have changed between suspend and resume */
835 drm_helper_hpd_irq_event(dev
);
837 intel_opregion_init(dev
);
839 intel_fbdev_set_suspend(dev
, FBINFO_STATE_RUNNING
, false);
841 mutex_lock(&dev_priv
->modeset_restore_lock
);
842 dev_priv
->modeset_restore
= MODESET_DONE
;
843 mutex_unlock(&dev_priv
->modeset_restore_lock
);
846 intel_opregion_notify_adapter(dev
, PCI_D0
);
849 drm_kms_helper_poll_enable(dev
);
851 enable_rpm_wakeref_asserts(dev_priv
);
856 static int i915_drm_resume_early(struct drm_device
*dev
)
858 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
862 * We have a resume ordering issue with the snd-hda driver also
863 * requiring our device to be power up. Due to the lack of a
864 * parent/child relationship we currently solve this with an early
867 * FIXME: This should be solved with a special hdmi sink device or
868 * similar so that power domains can be employed.
872 * Note that we need to set the power state explicitly, since we
873 * powered off the device during freeze and the PCI core won't power
874 * it back up for us during thaw. Powering off the device during
875 * freeze is not a hard requirement though, and during the
876 * suspend/resume phases the PCI core makes sure we get here with the
877 * device powered on. So in case we change our freeze logic and keep
878 * the device powered we can also remove the following set power state
882 ret
= pci_set_power_state(dev
->pdev
, PCI_D0
);
884 DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret
);
889 * Note that pci_enable_device() first enables any parent bridge
890 * device and only then sets the power state for this device. The
891 * bridge enabling is a nop though, since bridge devices are resumed
892 * first. The order of enabling power and enabling the device is
893 * imposed by the PCI core as described above, so here we preserve the
894 * same order for the freeze/thaw phases.
896 * TODO: eventually we should remove pci_disable_device() /
897 * pci_enable_enable_device() from suspend/resume. Due to how they
898 * depend on the device enable refcount we can't anyway depend on them
899 * disabling/enabling the device.
901 if (pci_enable_device(dev
->pdev
)) {
906 pci_set_master(dev
->pdev
);
909 disable_rpm_wakeref_asserts(dev_priv
);
911 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
912 ret
= vlv_resume_prepare(dev_priv
, false);
914 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
917 intel_uncore_early_sanitize(dev
, true);
919 if (IS_BROXTON(dev
)) {
920 if (!dev_priv
->suspended_to_idle
)
921 gen9_sanitize_dc_state(dev_priv
);
922 bxt_disable_dc9(dev_priv
);
923 } else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
924 hsw_disable_pc8(dev_priv
);
927 intel_uncore_sanitize(dev
);
929 if (IS_BROXTON(dev_priv
) ||
930 !(dev_priv
->suspended_to_idle
&& dev_priv
->csr
.dmc_payload
))
931 intel_power_domains_init_hw(dev_priv
, true);
933 enable_rpm_wakeref_asserts(dev_priv
);
938 dev_priv
->suspended_to_idle
= false;
943 int i915_resume_switcheroo(struct drm_device
*dev
)
947 if (dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
950 ret
= i915_drm_resume_early(dev
);
954 return i915_drm_resume(dev
);
957 /* XXX Hack for the old *BSD drm code base
958 * The device id field is set at probe time */
959 static drm_pci_id_list_t i915_attach_list
[] = {
960 {0x8086, 0, 0, "Intel i915 GPU"},
964 struct intel_device_info
*
965 i915_get_device_id(int device
)
967 const struct pci_device_id
*did
;
969 for (did
= &pciidlist
[0]; did
->device
!= 0; did
++) {
970 if (did
->device
!= device
)
972 return (struct intel_device_info
*)did
->driver_data
;
977 static int i915_sysctl_init(struct drm_device
*dev
, struct sysctl_ctx_list
*ctx
,
978 struct sysctl_oid
*top
)
980 return drm_add_busid_modesetting(dev
, ctx
, top
);
983 extern devclass_t drm_devclass
;
986 * i915_reset - reset chip after a hang
987 * @dev: drm device to reset
989 * Reset the chip. Useful if a hang is detected. Returns zero on successful
990 * reset or otherwise an error code.
992 * Procedure is fairly simple:
993 * - reset the chip using the reset reg
994 * - re-init context state
995 * - re-init hardware status page
996 * - re-init ring buffer
997 * - re-init interrupt state
1000 int i915_reset(struct drm_device
*dev
)
1002 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1003 struct i915_gpu_error
*error
= &dev_priv
->gpu_error
;
1004 unsigned reset_counter
;
1007 intel_reset_gt_powersave(dev
);
1009 mutex_lock(&dev
->struct_mutex
);
1011 /* Clear any previous failed attempts at recovery. Time to try again. */
1012 atomic_andnot(I915_WEDGED
, &error
->reset_counter
);
1014 /* Clear the reset-in-progress flag and increment the reset epoch. */
1015 reset_counter
= atomic_inc_return(&error
->reset_counter
);
1016 if (WARN_ON(__i915_reset_in_progress(reset_counter
))) {
1021 i915_gem_reset(dev
);
1023 ret
= intel_gpu_reset(dev
, ALL_ENGINES
);
1025 /* Also reset the gpu hangman. */
1026 if (error
->stop_rings
!= 0) {
1027 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
1028 error
->stop_rings
= 0;
1029 if (ret
== -ENODEV
) {
1030 DRM_INFO("Reset not implemented, but ignoring "
1031 "error for simulated gpu hangs\n");
1036 if (i915_stop_ring_allow_warn(dev_priv
))
1037 pr_notice("drm/i915: Resetting chip after gpu hang\n");
1041 DRM_ERROR("Failed to reset chip: %i\n", ret
);
1043 DRM_DEBUG_DRIVER("GPU reset disabled\n");
1047 intel_overlay_reset(dev_priv
);
1049 /* Ok, now get things going again... */
1052 * Everything depends on having the GTT running, so we need to start
1053 * there. Fortunately we don't need to do this unless we reset the
1054 * chip at a PCI level.
1056 * Next we need to restore the context, but we don't use those
1059 * Ring buffer needs to be re-initialized in the KMS case, or if X
1060 * was running at the time of the reset (i.e. we weren't VT
1063 ret
= i915_gem_init_hw(dev
);
1065 DRM_ERROR("Failed hw init on reset %d\n", ret
);
1069 mutex_unlock(&dev
->struct_mutex
);
1072 * rps/rc6 re-init is necessary to restore state lost after the
1073 * reset and the re-install of gt irqs. Skip for ironlake per
1074 * previous concerns that it doesn't respond well to some forms
1075 * of re-init after reset.
1077 if (INTEL_INFO(dev
)->gen
> 5)
1078 intel_enable_gt_powersave(dev
);
1083 atomic_or(I915_WEDGED
, &error
->reset_counter
);
1084 mutex_unlock(&dev
->struct_mutex
);
1088 static int i915_pci_probe(device_t kdev
)
1092 if (pci_get_class(kdev
) != PCIC_DISPLAY
)
1095 if (pci_get_vendor(kdev
) != PCI_VENDOR_INTEL
)
1098 device
= pci_get_device(kdev
);
1100 for (i
= 0; pciidlist
[i
].device
!= 0; i
++) {
1101 if (pciidlist
[i
].device
== device
) {
1102 i915_attach_list
[0].device
= device
;
1112 i915_pci_remove(struct pci_dev
*pdev
)
1114 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1119 static int i915_pm_suspend(struct device
*dev
)
1121 struct pci_dev
*pdev
= to_pci_dev(dev
);
1122 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
1124 if (!drm_dev
|| !drm_dev
->dev_private
) {
1125 dev_err(dev
, "DRM not initialized, aborting suspend.\n");
1129 if (drm_dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
1132 return i915_drm_suspend(drm_dev
);
1135 static int i915_pm_suspend_late(struct device
*dev
)
1137 struct drm_device
*drm_dev
= dev_to_i915(dev
)->dev
;
1140 * We have a suspend ordering issue with the snd-hda driver also
1141 * requiring our device to be power up. Due to the lack of a
1142 * parent/child relationship we currently solve this with an late
1145 * FIXME: This should be solved with a special hdmi sink device or
1146 * similar so that power domains can be employed.
1148 if (drm_dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
1151 return i915_drm_suspend_late(drm_dev
, false);
1154 static int i915_pm_poweroff_late(struct device
*dev
)
1156 struct drm_device
*drm_dev
= dev_to_i915(dev
)->dev
;
1158 if (drm_dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
1161 return i915_drm_suspend_late(drm_dev
, true);
1164 static int i915_pm_resume_early(struct device
*dev
)
1166 struct drm_device
*drm_dev
= dev_to_i915(dev
)->dev
;
1168 if (drm_dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
1171 return i915_drm_resume_early(drm_dev
);
1174 static int i915_pm_resume(struct device
*dev
)
1176 struct drm_device
*drm_dev
= dev_to_i915(dev
)->dev
;
1178 if (drm_dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
1181 return i915_drm_resume(drm_dev
);
1186 * Save all Gunit registers that may be lost after a D3 and a subsequent
1187 * S0i[R123] transition. The list of registers needing a save/restore is
1188 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
1189 * registers in the following way:
1190 * - Driver: saved/restored by the driver
1191 * - Punit : saved/restored by the Punit firmware
1192 * - No, w/o marking: no need to save/restore, since the register is R/O or
1193 * used internally by the HW in a way that doesn't depend
1194 * keeping the content across a suspend/resume.
1195 * - Debug : used for debugging
1197 * We save/restore all registers marked with 'Driver', with the following
1199 * - Registers out of use, including also registers marked with 'Debug'.
1200 * These have no effect on the driver's operation, so we don't save/restore
1201 * them to reduce the overhead.
1202 * - Registers that are fully setup by an initialization function called from
1203 * the resume path. For example many clock gating and RPS/RC6 registers.
1204 * - Registers that provide the right functionality with their reset defaults.
1206 * TODO: Except for registers that based on the above 3 criteria can be safely
1207 * ignored, we save/restore all others, practically treating the HW context as
1208 * a black-box for the driver. Further investigation is needed to reduce the
1209 * saved/restored registers even further, by following the same 3 criteria.
1211 static void vlv_save_gunit_s0ix_state(struct drm_i915_private
*dev_priv
)
1213 struct vlv_s0ix_state
*s
= &dev_priv
->vlv_s0ix_state
;
1216 /* GAM 0x4000-0x4770 */
1217 s
->wr_watermark
= I915_READ(GEN7_WR_WATERMARK
);
1218 s
->gfx_prio_ctrl
= I915_READ(GEN7_GFX_PRIO_CTRL
);
1219 s
->arb_mode
= I915_READ(ARB_MODE
);
1220 s
->gfx_pend_tlb0
= I915_READ(GEN7_GFX_PEND_TLB0
);
1221 s
->gfx_pend_tlb1
= I915_READ(GEN7_GFX_PEND_TLB1
);
1223 for (i
= 0; i
< ARRAY_SIZE(s
->lra_limits
); i
++)
1224 s
->lra_limits
[i
] = I915_READ(GEN7_LRA_LIMITS(i
));
1226 s
->media_max_req_count
= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT
);
1227 s
->gfx_max_req_count
= I915_READ(GEN7_GFX_MAX_REQ_COUNT
);
1229 s
->render_hwsp
= I915_READ(RENDER_HWS_PGA_GEN7
);
1230 s
->ecochk
= I915_READ(GAM_ECOCHK
);
1231 s
->bsd_hwsp
= I915_READ(BSD_HWS_PGA_GEN7
);
1232 s
->blt_hwsp
= I915_READ(BLT_HWS_PGA_GEN7
);
1234 s
->tlb_rd_addr
= I915_READ(GEN7_TLB_RD_ADDR
);
1236 /* MBC 0x9024-0x91D0, 0x8500 */
1237 s
->g3dctl
= I915_READ(VLV_G3DCTL
);
1238 s
->gsckgctl
= I915_READ(VLV_GSCKGCTL
);
1239 s
->mbctl
= I915_READ(GEN6_MBCTL
);
1241 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1242 s
->ucgctl1
= I915_READ(GEN6_UCGCTL1
);
1243 s
->ucgctl3
= I915_READ(GEN6_UCGCTL3
);
1244 s
->rcgctl1
= I915_READ(GEN6_RCGCTL1
);
1245 s
->rcgctl2
= I915_READ(GEN6_RCGCTL2
);
1246 s
->rstctl
= I915_READ(GEN6_RSTCTL
);
1247 s
->misccpctl
= I915_READ(GEN7_MISCCPCTL
);
1249 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1250 s
->gfxpause
= I915_READ(GEN6_GFXPAUSE
);
1251 s
->rpdeuhwtc
= I915_READ(GEN6_RPDEUHWTC
);
1252 s
->rpdeuc
= I915_READ(GEN6_RPDEUC
);
1253 s
->ecobus
= I915_READ(ECOBUS
);
1254 s
->pwrdwnupctl
= I915_READ(VLV_PWRDWNUPCTL
);
1255 s
->rp_down_timeout
= I915_READ(GEN6_RP_DOWN_TIMEOUT
);
1256 s
->rp_deucsw
= I915_READ(GEN6_RPDEUCSW
);
1257 s
->rcubmabdtmr
= I915_READ(GEN6_RCUBMABDTMR
);
1258 s
->rcedata
= I915_READ(VLV_RCEDATA
);
1259 s
->spare2gh
= I915_READ(VLV_SPAREG2H
);
1261 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1262 s
->gt_imr
= I915_READ(GTIMR
);
1263 s
->gt_ier
= I915_READ(GTIER
);
1264 s
->pm_imr
= I915_READ(GEN6_PMIMR
);
1265 s
->pm_ier
= I915_READ(GEN6_PMIER
);
1267 for (i
= 0; i
< ARRAY_SIZE(s
->gt_scratch
); i
++)
1268 s
->gt_scratch
[i
] = I915_READ(GEN7_GT_SCRATCH(i
));
1270 /* GT SA CZ domain, 0x100000-0x138124 */
1271 s
->tilectl
= I915_READ(TILECTL
);
1272 s
->gt_fifoctl
= I915_READ(GTFIFOCTL
);
1273 s
->gtlc_wake_ctrl
= I915_READ(VLV_GTLC_WAKE_CTRL
);
1274 s
->gtlc_survive
= I915_READ(VLV_GTLC_SURVIVABILITY_REG
);
1275 s
->pmwgicz
= I915_READ(VLV_PMWGICZ
);
1277 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1278 s
->gu_ctl0
= I915_READ(VLV_GU_CTL0
);
1279 s
->gu_ctl1
= I915_READ(VLV_GU_CTL1
);
1280 s
->pcbr
= I915_READ(VLV_PCBR
);
1281 s
->clock_gate_dis2
= I915_READ(VLV_GUNIT_CLOCK_GATE2
);
1284 * Not saving any of:
1285 * DFT, 0x9800-0x9EC0
1286 * SARB, 0xB000-0xB1FC
1287 * GAC, 0x5208-0x524C, 0x14000-0x14C000
1292 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private
*dev_priv
)
1294 struct vlv_s0ix_state
*s
= &dev_priv
->vlv_s0ix_state
;
1298 /* GAM 0x4000-0x4770 */
1299 I915_WRITE(GEN7_WR_WATERMARK
, s
->wr_watermark
);
1300 I915_WRITE(GEN7_GFX_PRIO_CTRL
, s
->gfx_prio_ctrl
);
1301 I915_WRITE(ARB_MODE
, s
->arb_mode
| (0xffff << 16));
1302 I915_WRITE(GEN7_GFX_PEND_TLB0
, s
->gfx_pend_tlb0
);
1303 I915_WRITE(GEN7_GFX_PEND_TLB1
, s
->gfx_pend_tlb1
);
1305 for (i
= 0; i
< ARRAY_SIZE(s
->lra_limits
); i
++)
1306 I915_WRITE(GEN7_LRA_LIMITS(i
), s
->lra_limits
[i
]);
1308 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT
, s
->media_max_req_count
);
1309 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT
, s
->gfx_max_req_count
);
1311 I915_WRITE(RENDER_HWS_PGA_GEN7
, s
->render_hwsp
);
1312 I915_WRITE(GAM_ECOCHK
, s
->ecochk
);
1313 I915_WRITE(BSD_HWS_PGA_GEN7
, s
->bsd_hwsp
);
1314 I915_WRITE(BLT_HWS_PGA_GEN7
, s
->blt_hwsp
);
1316 I915_WRITE(GEN7_TLB_RD_ADDR
, s
->tlb_rd_addr
);
1318 /* MBC 0x9024-0x91D0, 0x8500 */
1319 I915_WRITE(VLV_G3DCTL
, s
->g3dctl
);
1320 I915_WRITE(VLV_GSCKGCTL
, s
->gsckgctl
);
1321 I915_WRITE(GEN6_MBCTL
, s
->mbctl
);
1323 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1324 I915_WRITE(GEN6_UCGCTL1
, s
->ucgctl1
);
1325 I915_WRITE(GEN6_UCGCTL3
, s
->ucgctl3
);
1326 I915_WRITE(GEN6_RCGCTL1
, s
->rcgctl1
);
1327 I915_WRITE(GEN6_RCGCTL2
, s
->rcgctl2
);
1328 I915_WRITE(GEN6_RSTCTL
, s
->rstctl
);
1329 I915_WRITE(GEN7_MISCCPCTL
, s
->misccpctl
);
1331 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1332 I915_WRITE(GEN6_GFXPAUSE
, s
->gfxpause
);
1333 I915_WRITE(GEN6_RPDEUHWTC
, s
->rpdeuhwtc
);
1334 I915_WRITE(GEN6_RPDEUC
, s
->rpdeuc
);
1335 I915_WRITE(ECOBUS
, s
->ecobus
);
1336 I915_WRITE(VLV_PWRDWNUPCTL
, s
->pwrdwnupctl
);
1337 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
,s
->rp_down_timeout
);
1338 I915_WRITE(GEN6_RPDEUCSW
, s
->rp_deucsw
);
1339 I915_WRITE(GEN6_RCUBMABDTMR
, s
->rcubmabdtmr
);
1340 I915_WRITE(VLV_RCEDATA
, s
->rcedata
);
1341 I915_WRITE(VLV_SPAREG2H
, s
->spare2gh
);
1343 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1344 I915_WRITE(GTIMR
, s
->gt_imr
);
1345 I915_WRITE(GTIER
, s
->gt_ier
);
1346 I915_WRITE(GEN6_PMIMR
, s
->pm_imr
);
1347 I915_WRITE(GEN6_PMIER
, s
->pm_ier
);
1349 for (i
= 0; i
< ARRAY_SIZE(s
->gt_scratch
); i
++)
1350 I915_WRITE(GEN7_GT_SCRATCH(i
), s
->gt_scratch
[i
]);
1352 /* GT SA CZ domain, 0x100000-0x138124 */
1353 I915_WRITE(TILECTL
, s
->tilectl
);
1354 I915_WRITE(GTFIFOCTL
, s
->gt_fifoctl
);
1356 * Preserve the GT allow wake and GFX force clock bit, they are not
1357 * be restored, as they are used to control the s0ix suspend/resume
1358 * sequence by the caller.
1360 val
= I915_READ(VLV_GTLC_WAKE_CTRL
);
1361 val
&= VLV_GTLC_ALLOWWAKEREQ
;
1362 val
|= s
->gtlc_wake_ctrl
& ~VLV_GTLC_ALLOWWAKEREQ
;
1363 I915_WRITE(VLV_GTLC_WAKE_CTRL
, val
);
1365 val
= I915_READ(VLV_GTLC_SURVIVABILITY_REG
);
1366 val
&= VLV_GFX_CLK_FORCE_ON_BIT
;
1367 val
|= s
->gtlc_survive
& ~VLV_GFX_CLK_FORCE_ON_BIT
;
1368 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG
, val
);
1370 I915_WRITE(VLV_PMWGICZ
, s
->pmwgicz
);
1372 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1373 I915_WRITE(VLV_GU_CTL0
, s
->gu_ctl0
);
1374 I915_WRITE(VLV_GU_CTL1
, s
->gu_ctl1
);
1375 I915_WRITE(VLV_PCBR
, s
->pcbr
);
1376 I915_WRITE(VLV_GUNIT_CLOCK_GATE2
, s
->clock_gate_dis2
);
1379 int vlv_force_gfx_clock(struct drm_i915_private
*dev_priv
, bool force_on
)
1384 #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
1386 val
= I915_READ(VLV_GTLC_SURVIVABILITY_REG
);
1387 val
&= ~VLV_GFX_CLK_FORCE_ON_BIT
;
1389 val
|= VLV_GFX_CLK_FORCE_ON_BIT
;
1390 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG
, val
);
1395 err
= wait_for(COND
, 20);
1397 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
1398 I915_READ(VLV_GTLC_SURVIVABILITY_REG
));
1404 static int vlv_allow_gt_wake(struct drm_i915_private
*dev_priv
, bool allow
)
1409 val
= I915_READ(VLV_GTLC_WAKE_CTRL
);
1410 val
&= ~VLV_GTLC_ALLOWWAKEREQ
;
1412 val
|= VLV_GTLC_ALLOWWAKEREQ
;
1413 I915_WRITE(VLV_GTLC_WAKE_CTRL
, val
);
1414 POSTING_READ(VLV_GTLC_WAKE_CTRL
);
1416 #define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
1418 err
= wait_for(COND
, 1);
1420 DRM_ERROR("timeout disabling GT waking\n");
1425 static int vlv_wait_for_gt_wells(struct drm_i915_private
*dev_priv
,
1432 mask
= VLV_GTLC_PW_MEDIA_STATUS_MASK
| VLV_GTLC_PW_RENDER_STATUS_MASK
;
1433 val
= wait_for_on
? mask
: 0;
1434 #define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
1438 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
1440 I915_READ(VLV_GTLC_PW_STATUS
));
1443 * RC6 transitioning can be delayed up to 2 msec (see
1444 * valleyview_enable_rps), use 3 msec for safety.
1446 err
= wait_for(COND
, 3);
1448 DRM_ERROR("timeout waiting for GT wells to go %s\n",
1449 onoff(wait_for_on
));
1455 static void vlv_check_no_gt_access(struct drm_i915_private
*dev_priv
)
1457 if (!(I915_READ(VLV_GTLC_PW_STATUS
) & VLV_GTLC_ALLOWWAKEERR
))
1460 DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
1461 I915_WRITE(VLV_GTLC_PW_STATUS
, VLV_GTLC_ALLOWWAKEERR
);
1464 static int vlv_suspend_complete(struct drm_i915_private
*dev_priv
)
1470 * Bspec defines the following GT well on flags as debug only, so
1471 * don't treat them as hard failures.
1473 (void)vlv_wait_for_gt_wells(dev_priv
, false);
1475 mask
= VLV_GTLC_RENDER_CTX_EXISTS
| VLV_GTLC_MEDIA_CTX_EXISTS
;
1476 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL
) & mask
) != mask
);
1478 vlv_check_no_gt_access(dev_priv
);
1480 err
= vlv_force_gfx_clock(dev_priv
, true);
1484 err
= vlv_allow_gt_wake(dev_priv
, false);
1488 if (!IS_CHERRYVIEW(dev_priv
))
1489 vlv_save_gunit_s0ix_state(dev_priv
);
1491 err
= vlv_force_gfx_clock(dev_priv
, false);
1498 /* For safety always re-enable waking and disable gfx clock forcing */
1499 vlv_allow_gt_wake(dev_priv
, true);
1501 vlv_force_gfx_clock(dev_priv
, false);
1506 static int vlv_resume_prepare(struct drm_i915_private
*dev_priv
,
1509 struct drm_device
*dev
= dev_priv
->dev
;
1514 * If any of the steps fail just try to continue, that's the best we
1515 * can do at this point. Return the first error code (which will also
1516 * leave RPM permanently disabled).
1518 ret
= vlv_force_gfx_clock(dev_priv
, true);
1520 if (!IS_CHERRYVIEW(dev_priv
))
1521 vlv_restore_gunit_s0ix_state(dev_priv
);
1523 err
= vlv_allow_gt_wake(dev_priv
, true);
1527 err
= vlv_force_gfx_clock(dev_priv
, false);
1531 vlv_check_no_gt_access(dev_priv
);
1534 intel_init_clock_gating(dev
);
1535 i915_gem_restore_fences(dev
);
1542 static int intel_runtime_suspend(struct device
*device
)
1544 struct pci_dev
*pdev
= to_pci_dev(device
);
1545 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1546 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1549 if (WARN_ON_ONCE(!(dev_priv
->rps
.enabled
&& intel_enable_rc6(dev
))))
1552 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev
)))
1555 DRM_DEBUG_KMS("Suspending device\n");
1558 * We could deadlock here in case another thread holding struct_mutex
1559 * calls RPM suspend concurrently, since the RPM suspend will wait
1560 * first for this RPM suspend to finish. In this case the concurrent
1561 * RPM resume will be followed by its RPM suspend counterpart. Still
1562 * for consistency return -EAGAIN, which will reschedule this suspend.
1564 if (!mutex_trylock(&dev
->struct_mutex
)) {
1565 DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
1567 * Bump the expiration timestamp, otherwise the suspend won't
1570 pm_runtime_mark_last_busy(device
);
1575 disable_rpm_wakeref_asserts(dev_priv
);
1578 * We are safe here against re-faults, since the fault handler takes
1581 i915_gem_release_all_mmaps(dev_priv
);
1582 mutex_unlock(&dev
->struct_mutex
);
1584 cancel_delayed_work_sync(&dev_priv
->gpu_error
.hangcheck_work
);
1586 intel_guc_suspend(dev
);
1588 intel_suspend_gt_powersave(dev
);
1589 intel_runtime_pm_disable_interrupts(dev_priv
);
1592 if (IS_BROXTON(dev_priv
)) {
1593 bxt_display_core_uninit(dev_priv
);
1594 bxt_enable_dc9(dev_priv
);
1595 } else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
1596 hsw_enable_pc8(dev_priv
);
1597 } else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
1598 ret
= vlv_suspend_complete(dev_priv
);
1602 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret
);
1603 intel_runtime_pm_enable_interrupts(dev_priv
);
1605 enable_rpm_wakeref_asserts(dev_priv
);
1610 intel_uncore_forcewake_reset(dev
, false);
1612 enable_rpm_wakeref_asserts(dev_priv
);
1613 WARN_ON_ONCE(atomic_read(&dev_priv
->pm
.wakeref_count
));
1615 if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv
))
1616 DRM_ERROR("Unclaimed access detected prior to suspending\n");
1618 dev_priv
->pm
.suspended
= true;
1621 * FIXME: We really should find a document that references the arguments
1624 if (IS_BROADWELL(dev
)) {
1626 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1627 * being detected, and the call we do at intel_runtime_resume()
1628 * won't be able to restore them. Since PCI_D3hot matches the
1629 * actual specification and appears to be working, use it.
1631 intel_opregion_notify_adapter(dev
, PCI_D3hot
);
1634 * current versions of firmware which depend on this opregion
1635 * notification have repurposed the D1 definition to mean
1636 * "runtime suspended" vs. what you would normally expect (D3)
1637 * to distinguish it from notifications that might be sent via
1640 intel_opregion_notify_adapter(dev
, PCI_D1
);
1643 assert_forcewakes_inactive(dev_priv
);
1645 if (!IS_VALLEYVIEW(dev_priv
) || !IS_CHERRYVIEW(dev_priv
))
1646 intel_hpd_poll_init(dev_priv
);
1648 DRM_DEBUG_KMS("Device suspended\n");
1652 static int intel_runtime_resume(struct device
*device
)
1654 struct pci_dev
*pdev
= to_pci_dev(device
);
1655 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1656 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1659 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev
)))
1662 DRM_DEBUG_KMS("Resuming device\n");
1664 WARN_ON_ONCE(atomic_read(&dev_priv
->pm
.wakeref_count
));
1665 disable_rpm_wakeref_asserts(dev_priv
);
1667 intel_opregion_notify_adapter(dev
, PCI_D0
);
1668 dev_priv
->pm
.suspended
= false;
1669 if (intel_uncore_unclaimed_mmio(dev_priv
))
1670 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
1672 intel_guc_resume(dev
);
1674 if (IS_GEN6(dev_priv
))
1675 intel_init_pch_refclk(dev
);
1677 if (IS_BROXTON(dev
)) {
1678 bxt_disable_dc9(dev_priv
);
1679 bxt_display_core_init(dev_priv
, true);
1680 if (dev_priv
->csr
.dmc_payload
&&
1681 (dev_priv
->csr
.allowed_dc_mask
& DC_STATE_EN_UPTO_DC5
))
1682 gen9_enable_dc5(dev_priv
);
1683 } else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
1684 hsw_disable_pc8(dev_priv
);
1685 } else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
1686 ret
= vlv_resume_prepare(dev_priv
, true);
1690 * No point of rolling back things in case of an error, as the best
1691 * we can do is to hope that things will still work (and disable RPM).
1693 i915_gem_init_swizzling(dev
);
1694 gen6_update_ring_freq(dev
);
1696 intel_runtime_pm_enable_interrupts(dev_priv
);
1699 * On VLV/CHV display interrupts are part of the display
1700 * power well, so hpd is reinitialized from there. For
1701 * everyone else do it here.
1703 if (!IS_VALLEYVIEW(dev_priv
) && !IS_CHERRYVIEW(dev_priv
))
1704 intel_hpd_init(dev_priv
);
1706 intel_enable_gt_powersave(dev
);
1708 enable_rpm_wakeref_asserts(dev_priv
);
1711 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret
);
1713 DRM_DEBUG_KMS("Device resumed\n");
1718 static const struct dev_pm_ops i915_pm_ops
= {
1720 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1723 .suspend
= i915_pm_suspend
,
1724 .suspend_late
= i915_pm_suspend_late
,
1725 .resume_early
= i915_pm_resume_early
,
1726 .resume
= i915_pm_resume
,
1730 * @freeze, @freeze_late : called (1) before creating the
1731 * hibernation image [PMSG_FREEZE] and
1732 * (2) after rebooting, before restoring
1733 * the image [PMSG_QUIESCE]
1734 * @thaw, @thaw_early : called (1) after creating the hibernation
1735 * image, before writing it [PMSG_THAW]
1736 * and (2) after failing to create or
1737 * restore the image [PMSG_RECOVER]
1738 * @poweroff, @poweroff_late: called after writing the hibernation
1739 * image, before rebooting [PMSG_HIBERNATE]
1740 * @restore, @restore_early : called after rebooting and restoring the
1741 * hibernation image [PMSG_RESTORE]
1743 .freeze
= i915_pm_suspend
,
1744 .freeze_late
= i915_pm_suspend_late
,
1745 .thaw_early
= i915_pm_resume_early
,
1746 .thaw
= i915_pm_resume
,
1747 .poweroff
= i915_pm_suspend
,
1748 .poweroff_late
= i915_pm_poweroff_late
,
1749 .restore_early
= i915_pm_resume_early
,
1750 .restore
= i915_pm_resume
,
1752 /* S0ix (via runtime suspend) event handlers */
1753 .runtime_suspend
= intel_runtime_suspend
,
1754 .runtime_resume
= intel_runtime_resume
,
1757 static const struct vm_operations_struct i915_gem_vm_ops
= {
1758 .fault
= i915_gem_fault
,
1759 .open
= drm_gem_vm_open
,
1760 .close
= drm_gem_vm_close
,
1763 static const struct file_operations i915_driver_fops
= {
1764 .owner
= THIS_MODULE
,
1766 .release
= drm_release
,
1767 .unlocked_ioctl
= drm_ioctl
,
1768 .mmap
= drm_gem_mmap
,
1771 #ifdef CONFIG_COMPAT
1772 .compat_ioctl
= i915_compat_ioctl
,
1774 .llseek
= noop_llseek
,
1778 static struct cdev_pager_ops i915_gem_vm_ops
= {
1779 .cdev_pg_fault
= i915_gem_fault
,
1780 .cdev_pg_ctor
= i915_gem_pager_ctor
,
1781 .cdev_pg_dtor
= i915_gem_pager_dtor
1784 static struct drm_driver driver
= {
1785 /* Don't use MTRRs here; the Xserver or userspace app should
1786 * deal with them for Intel hardware.
1789 DRIVER_HAVE_IRQ
| DRIVER_IRQ_SHARED
| DRIVER_GEM
|
1790 DRIVER_RENDER
| DRIVER_MODESET
,
1791 .load
= i915_driver_load
,
1792 .unload
= i915_driver_unload
,
1793 .open
= i915_driver_open
,
1794 .lastclose
= i915_driver_lastclose
,
1795 .preclose
= i915_driver_preclose
,
1796 .postclose
= i915_driver_postclose
,
1798 #if defined(CONFIG_DEBUG_FS)
1799 .debugfs_init
= i915_debugfs_init
,
1800 .debugfs_cleanup
= i915_debugfs_cleanup
,
1802 .gem_free_object
= i915_gem_free_object
,
1803 .gem_pager_ops
= &i915_gem_vm_ops
,
1805 .dumb_create
= i915_gem_dumb_create
,
1806 .dumb_map_offset
= i915_gem_mmap_gtt
,
1807 .dumb_destroy
= drm_gem_dumb_destroy
,
1808 .ioctls
= i915_ioctls
,
1809 .sysctl_init
= i915_sysctl_init
,
1810 .name
= DRIVER_NAME
,
1811 .desc
= DRIVER_DESC
,
1812 .date
= DRIVER_DATE
,
1813 .major
= DRIVER_MAJOR
,
1814 .minor
= DRIVER_MINOR
,
1815 .patchlevel
= DRIVER_PATCHLEVEL
,
1818 static int __init
i915_init(void);
1821 i915_attach(device_t kdev
)
1823 struct drm_device
*dev
= device_get_softc(kdev
);
1829 dev
->driver
= &driver
;
1830 error
= drm_attach(kdev
, i915_attach_list
);
1833 * XXX hack - give the kvm_console time to come up before X starts
1834 * messing with everything, avoiding at least one deadlock.
1836 tsleep(&dummy
, 0, "i915_attach", hz
*2);
1841 static device_method_t i915_methods
[] = {
1842 /* Device interface */
1843 DEVMETHOD(device_probe
, i915_pci_probe
),
1844 DEVMETHOD(device_attach
, i915_attach
),
1845 DEVMETHOD(device_suspend
, i915_suspend_switcheroo
),
1846 DEVMETHOD(device_resume
, i915_resume_switcheroo
),
1847 DEVMETHOD(device_detach
, drm_release
),
1851 static driver_t i915_driver
= {
1854 sizeof(struct drm_device
)
1857 static int __init
i915_init(void)
1859 driver
.num_ioctls
= i915_max_ioctl
;
1862 * Enable KMS by default, unless explicitly overriden by
1863 * either the i915.modeset prarameter or by the
1864 * vga_text_mode_force boot option.
1867 if (i915
.modeset
== 0)
1868 driver
.driver_features
&= ~DRIVER_MODESET
;
1870 if (vgacon_text_force() && i915
.modeset
== -1)
1871 driver
.driver_features
&= ~DRIVER_MODESET
;
1873 if (!(driver
.driver_features
& DRIVER_MODESET
)) {
1874 /* Silently fail loading to not upset userspace. */
1875 DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
1879 if (i915
.nuclear_pageflip
)
1880 driver
.driver_features
|= DRIVER_ATOMIC
;
1883 return drm_pci_init(&driver
, &i915_pci_driver
);
1890 static void __exit
i915_exit(void)
1892 if (!(driver
.driver_features
& DRIVER_MODESET
))
1893 return; /* Never loaded a driver. */
1895 drm_pci_exit(&driver
, &i915_pci_driver
);
1899 DRIVER_MODULE_ORDERED(i915
, vgapci
, i915_driver
, drm_devclass
, NULL
, NULL
, SI_ORDER_ANY
);
1900 MODULE_DEPEND(i915
, drm
, 1, 1, 1);
1901 MODULE_DEPEND(i915
, iicbus
, 1, 1, 1);
1902 MODULE_DEPEND(i915
, iic
, 1, 1, 1);
1903 MODULE_DEPEND(i915
, iicbb
, 1, 1, 1);
1905 MODULE_DEPEND(i915
, acpi
, 1, 1, 1);