2 * Copyright © 2013 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "intel_drv.h"
26 #include "i915_vgpu.h"
28 #include <asm/iosf_mbi.h>
29 #include <linux/pm_runtime.h>
31 #define FORCEWAKE_ACK_TIMEOUT_MS 50
33 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
35 static const char * const forcewake_domain_names
[] = {
42 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id
)
44 BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names
) != FW_DOMAIN_ID_COUNT
);
46 if (id
>= 0 && id
< FW_DOMAIN_ID_COUNT
)
47 return forcewake_domain_names
[id
];
55 fw_domain_reset(struct drm_i915_private
*i915
,
56 const struct intel_uncore_forcewake_domain
*d
)
58 __raw_i915_write32(i915
, d
->reg_set
, i915
->uncore
.fw_reset
);
62 fw_domain_arm_timer(struct intel_uncore_forcewake_domain
*d
)
65 hrtimer_start_range_ns(&d
->timer
,
72 fw_domain_wait_ack_clear(const struct drm_i915_private
*i915
,
73 const struct intel_uncore_forcewake_domain
*d
)
75 if (wait_for_atomic((__raw_i915_read32(i915
, d
->reg_ack
) &
76 FORCEWAKE_KERNEL
) == 0,
77 FORCEWAKE_ACK_TIMEOUT_MS
))
78 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
79 intel_uncore_forcewake_domain_to_str(d
->id
));
83 fw_domain_get(struct drm_i915_private
*i915
,
84 const struct intel_uncore_forcewake_domain
*d
)
86 __raw_i915_write32(i915
, d
->reg_set
, i915
->uncore
.fw_set
);
90 fw_domain_wait_ack(const struct drm_i915_private
*i915
,
91 const struct intel_uncore_forcewake_domain
*d
)
93 if (wait_for_atomic((__raw_i915_read32(i915
, d
->reg_ack
) &
95 FORCEWAKE_ACK_TIMEOUT_MS
))
96 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
97 intel_uncore_forcewake_domain_to_str(d
->id
));
101 fw_domain_put(const struct drm_i915_private
*i915
,
102 const struct intel_uncore_forcewake_domain
*d
)
104 __raw_i915_write32(i915
, d
->reg_set
, i915
->uncore
.fw_clear
);
108 fw_domains_get(struct drm_i915_private
*i915
, enum forcewake_domains fw_domains
)
110 struct intel_uncore_forcewake_domain
*d
;
113 GEM_BUG_ON(fw_domains
& ~i915
->uncore
.fw_domains
);
115 for_each_fw_domain_masked(d
, fw_domains
, i915
, tmp
) {
116 fw_domain_wait_ack_clear(i915
, d
);
117 fw_domain_get(i915
, d
);
120 for_each_fw_domain_masked(d
, fw_domains
, i915
, tmp
)
121 fw_domain_wait_ack(i915
, d
);
123 i915
->uncore
.fw_domains_active
|= fw_domains
;
127 fw_domains_put(struct drm_i915_private
*i915
, enum forcewake_domains fw_domains
)
129 struct intel_uncore_forcewake_domain
*d
;
132 GEM_BUG_ON(fw_domains
& ~i915
->uncore
.fw_domains
);
134 for_each_fw_domain_masked(d
, fw_domains
, i915
, tmp
)
135 fw_domain_put(i915
, d
);
137 i915
->uncore
.fw_domains_active
&= ~fw_domains
;
141 fw_domains_reset(struct drm_i915_private
*i915
,
142 enum forcewake_domains fw_domains
)
144 struct intel_uncore_forcewake_domain
*d
;
150 GEM_BUG_ON(fw_domains
& ~i915
->uncore
.fw_domains
);
152 for_each_fw_domain_masked(d
, fw_domains
, i915
, tmp
)
153 fw_domain_reset(i915
, d
);
156 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private
*dev_priv
)
158 /* w/a for a sporadic read returning 0 by waiting for the GT
161 if (wait_for_atomic_us((__raw_i915_read32(dev_priv
, GEN6_GT_THREAD_STATUS_REG
) &
162 GEN6_GT_THREAD_STATUS_CORE_MASK
) == 0, 500))
163 DRM_ERROR("GT thread status wait timed out\n");
166 static void fw_domains_get_with_thread_status(struct drm_i915_private
*dev_priv
,
167 enum forcewake_domains fw_domains
)
169 fw_domains_get(dev_priv
, fw_domains
);
171 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
172 __gen6_gt_wait_for_thread_c0(dev_priv
);
175 static void gen6_gt_check_fifodbg(struct drm_i915_private
*dev_priv
)
179 gtfifodbg
= __raw_i915_read32(dev_priv
, GTFIFODBG
);
180 if (WARN(gtfifodbg
, "GT wake FIFO error 0x%x\n", gtfifodbg
))
181 __raw_i915_write32(dev_priv
, GTFIFODBG
, gtfifodbg
);
184 static void fw_domains_put_with_fifo(struct drm_i915_private
*dev_priv
,
185 enum forcewake_domains fw_domains
)
187 fw_domains_put(dev_priv
, fw_domains
);
188 gen6_gt_check_fifodbg(dev_priv
);
191 static inline u32
fifo_free_entries(struct drm_i915_private
*dev_priv
)
193 u32 count
= __raw_i915_read32(dev_priv
, GTFIFOCTL
);
195 return count
& GT_FIFO_FREE_ENTRIES_MASK
;
198 static int __gen6_gt_wait_for_fifo(struct drm_i915_private
*dev_priv
)
202 /* On VLV, FIFO will be shared by both SW and HW.
203 * So, we need to read the FREE_ENTRIES everytime */
204 if (IS_VALLEYVIEW(dev_priv
))
205 dev_priv
->uncore
.fifo_count
= fifo_free_entries(dev_priv
);
207 if (dev_priv
->uncore
.fifo_count
< GT_FIFO_NUM_RESERVED_ENTRIES
) {
209 u32 fifo
= fifo_free_entries(dev_priv
);
211 while (fifo
<= GT_FIFO_NUM_RESERVED_ENTRIES
&& loop
--) {
213 fifo
= fifo_free_entries(dev_priv
);
215 if (WARN_ON(loop
< 0 && fifo
<= GT_FIFO_NUM_RESERVED_ENTRIES
))
217 dev_priv
->uncore
.fifo_count
= fifo
;
219 dev_priv
->uncore
.fifo_count
--;
224 static enum hrtimer_restart
225 intel_uncore_fw_release_timer(struct hrtimer
*timer
)
227 struct intel_uncore_forcewake_domain
*domain
=
228 container_of(timer
, struct intel_uncore_forcewake_domain
, timer
);
229 struct drm_i915_private
*dev_priv
=
230 container_of(domain
, struct drm_i915_private
, uncore
.fw_domain
[domain
->id
]);
231 unsigned long irqflags
;
233 assert_rpm_device_not_suspended(dev_priv
);
235 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
236 if (WARN_ON(domain
->wake_count
== 0))
237 domain
->wake_count
++;
239 if (--domain
->wake_count
== 0)
240 dev_priv
->uncore
.funcs
.force_wake_put(dev_priv
, domain
->mask
);
242 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
244 return HRTIMER_NORESTART
;
247 static void intel_uncore_forcewake_reset(struct drm_i915_private
*dev_priv
,
250 unsigned long irqflags
;
251 struct intel_uncore_forcewake_domain
*domain
;
252 int retry_count
= 100;
253 enum forcewake_domains fw
, active_domains
;
255 /* Hold uncore.lock across reset to prevent any register access
256 * with forcewake not set correctly. Wait until all pending
257 * timers are run before holding.
264 for_each_fw_domain(domain
, dev_priv
, tmp
) {
265 if (hrtimer_cancel(&domain
->timer
) == 0)
268 intel_uncore_fw_release_timer(&domain
->timer
);
271 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
273 for_each_fw_domain(domain
, dev_priv
, tmp
) {
274 if (hrtimer_active(&domain
->timer
))
275 active_domains
|= domain
->mask
;
278 if (active_domains
== 0)
281 if (--retry_count
== 0) {
282 DRM_ERROR("Timed out waiting for forcewake timers to finish\n");
286 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
290 WARN_ON(active_domains
);
292 fw
= dev_priv
->uncore
.fw_domains_active
;
294 dev_priv
->uncore
.funcs
.force_wake_put(dev_priv
, fw
);
296 fw_domains_reset(dev_priv
, dev_priv
->uncore
.fw_domains
);
298 if (restore
) { /* If reset with a user forcewake, try to restore */
300 dev_priv
->uncore
.funcs
.force_wake_get(dev_priv
, fw
);
302 if (IS_GEN6(dev_priv
) || IS_GEN7(dev_priv
))
303 dev_priv
->uncore
.fifo_count
=
304 fifo_free_entries(dev_priv
);
308 assert_forcewakes_inactive(dev_priv
);
310 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
313 static u64
gen9_edram_size(struct drm_i915_private
*dev_priv
)
315 const unsigned int ways
[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
316 const unsigned int sets
[4] = { 1, 1, 2, 2 };
317 const u32 cap
= dev_priv
->edram_cap
;
319 return EDRAM_NUM_BANKS(cap
) *
320 ways
[EDRAM_WAYS_IDX(cap
)] *
321 sets
[EDRAM_SETS_IDX(cap
)] *
325 u64
intel_uncore_edram_size(struct drm_i915_private
*dev_priv
)
327 if (!HAS_EDRAM(dev_priv
))
330 /* The needed capability bits for size calculation
331 * are not there with pre gen9 so return 128MB always.
333 if (INTEL_GEN(dev_priv
) < 9)
334 return 128 * 1024 * 1024;
336 return gen9_edram_size(dev_priv
);
339 static void intel_uncore_edram_detect(struct drm_i915_private
*dev_priv
)
341 if (IS_HASWELL(dev_priv
) ||
342 IS_BROADWELL(dev_priv
) ||
343 INTEL_GEN(dev_priv
) >= 9) {
344 dev_priv
->edram_cap
= __raw_i915_read32(dev_priv
,
347 /* NB: We can't write IDICR yet because we do not have gt funcs
350 dev_priv
->edram_cap
= 0;
353 if (HAS_EDRAM(dev_priv
))
354 DRM_INFO("Found %lluMB of eDRAM\n",
355 intel_uncore_edram_size(dev_priv
) / (1024 * 1024));
359 fpga_check_for_unclaimed_mmio(struct drm_i915_private
*dev_priv
)
363 dbg
= __raw_i915_read32(dev_priv
, FPGA_DBG
);
364 if (likely(!(dbg
& FPGA_DBG_RM_NOCLAIM
)))
367 __raw_i915_write32(dev_priv
, FPGA_DBG
, FPGA_DBG_RM_NOCLAIM
);
373 vlv_check_for_unclaimed_mmio(struct drm_i915_private
*dev_priv
)
377 cer
= __raw_i915_read32(dev_priv
, CLAIM_ER
);
378 if (likely(!(cer
& (CLAIM_ER_OVERFLOW
| CLAIM_ER_CTR_MASK
))))
381 __raw_i915_write32(dev_priv
, CLAIM_ER
, CLAIM_ER_CLR
);
387 check_for_unclaimed_mmio(struct drm_i915_private
*dev_priv
)
389 if (HAS_FPGA_DBG_UNCLAIMED(dev_priv
))
390 return fpga_check_for_unclaimed_mmio(dev_priv
);
392 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
393 return vlv_check_for_unclaimed_mmio(dev_priv
);
398 static void __intel_uncore_early_sanitize(struct drm_i915_private
*dev_priv
,
399 bool restore_forcewake
)
401 struct intel_device_info
*info
= mkwrite_device_info(dev_priv
);
403 /* clear out unclaimed reg detection bit */
404 if (check_for_unclaimed_mmio(dev_priv
))
405 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
407 /* clear out old GT FIFO errors */
408 if (IS_GEN6(dev_priv
) || IS_GEN7(dev_priv
))
409 __raw_i915_write32(dev_priv
, GTFIFODBG
,
410 __raw_i915_read32(dev_priv
, GTFIFODBG
));
412 /* WaDisableShadowRegForCpd:chv */
413 if (IS_CHERRYVIEW(dev_priv
)) {
414 __raw_i915_write32(dev_priv
, GTFIFOCTL
,
415 __raw_i915_read32(dev_priv
, GTFIFOCTL
) |
416 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL
|
417 GT_FIFO_CTL_RC6_POLICY_STALL
);
420 if (IS_BXT_REVID(dev_priv
, 0, BXT_REVID_B_LAST
))
421 info
->has_decoupled_mmio
= false;
423 intel_uncore_forcewake_reset(dev_priv
, restore_forcewake
);
426 void intel_uncore_suspend(struct drm_i915_private
*dev_priv
)
428 iosf_mbi_unregister_pmic_bus_access_notifier(
429 &dev_priv
->uncore
.pmic_bus_access_nb
);
430 intel_uncore_forcewake_reset(dev_priv
, false);
433 void intel_uncore_resume_early(struct drm_i915_private
*dev_priv
)
435 __intel_uncore_early_sanitize(dev_priv
, true);
436 iosf_mbi_register_pmic_bus_access_notifier(
437 &dev_priv
->uncore
.pmic_bus_access_nb
);
438 i915_check_and_clear_faults(dev_priv
);
441 void intel_uncore_sanitize(struct drm_i915_private
*dev_priv
)
443 i915
.enable_rc6
= sanitize_rc6_option(dev_priv
, i915
.enable_rc6
);
445 /* BIOS often leaves RC6 enabled, but disable it for hw init */
446 intel_sanitize_gt_powersave(dev_priv
);
449 static void __intel_uncore_forcewake_get(struct drm_i915_private
*dev_priv
,
450 enum forcewake_domains fw_domains
)
452 struct intel_uncore_forcewake_domain
*domain
;
455 fw_domains
&= dev_priv
->uncore
.fw_domains
;
457 for_each_fw_domain_masked(domain
, fw_domains
, dev_priv
, tmp
)
458 if (domain
->wake_count
++)
459 fw_domains
&= ~domain
->mask
;
462 dev_priv
->uncore
.funcs
.force_wake_get(dev_priv
, fw_domains
);
466 * intel_uncore_forcewake_get - grab forcewake domain references
467 * @dev_priv: i915 device instance
468 * @fw_domains: forcewake domains to get reference on
470 * This function can be used get GT's forcewake domain references.
471 * Normal register access will handle the forcewake domains automatically.
472 * However if some sequence requires the GT to not power down a particular
473 * forcewake domains this function should be called at the beginning of the
474 * sequence. And subsequently the reference should be dropped by symmetric
475 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
476 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
478 void intel_uncore_forcewake_get(struct drm_i915_private
*dev_priv
,
479 enum forcewake_domains fw_domains
)
481 unsigned long irqflags
;
483 if (!dev_priv
->uncore
.funcs
.force_wake_get
)
486 assert_rpm_wakelock_held(dev_priv
);
488 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
489 __intel_uncore_forcewake_get(dev_priv
, fw_domains
);
490 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
494 * intel_uncore_forcewake_get__locked - grab forcewake domain references
495 * @dev_priv: i915 device instance
496 * @fw_domains: forcewake domains to get reference on
498 * See intel_uncore_forcewake_get(). This variant places the onus
499 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
501 void intel_uncore_forcewake_get__locked(struct drm_i915_private
*dev_priv
,
502 enum forcewake_domains fw_domains
)
504 lockdep_assert_held(&dev_priv
->uncore
.lock
);
506 if (!dev_priv
->uncore
.funcs
.force_wake_get
)
509 __intel_uncore_forcewake_get(dev_priv
, fw_domains
);
512 static void __intel_uncore_forcewake_put(struct drm_i915_private
*dev_priv
,
513 enum forcewake_domains fw_domains
)
515 struct intel_uncore_forcewake_domain
*domain
;
518 fw_domains
&= dev_priv
->uncore
.fw_domains
;
520 for_each_fw_domain_masked(domain
, fw_domains
, dev_priv
, tmp
) {
521 if (WARN_ON(domain
->wake_count
== 0))
524 if (--domain
->wake_count
)
527 fw_domain_arm_timer(domain
);
532 * intel_uncore_forcewake_put - release a forcewake domain reference
533 * @dev_priv: i915 device instance
534 * @fw_domains: forcewake domains to put references
536 * This function drops the device-level forcewakes for specified
537 * domains obtained by intel_uncore_forcewake_get().
539 void intel_uncore_forcewake_put(struct drm_i915_private
*dev_priv
,
540 enum forcewake_domains fw_domains
)
542 unsigned long irqflags
;
544 if (!dev_priv
->uncore
.funcs
.force_wake_put
)
547 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
548 __intel_uncore_forcewake_put(dev_priv
, fw_domains
);
549 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
553 * intel_uncore_forcewake_put__locked - grab forcewake domain references
554 * @dev_priv: i915 device instance
555 * @fw_domains: forcewake domains to get reference on
557 * See intel_uncore_forcewake_put(). This variant places the onus
558 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
560 void intel_uncore_forcewake_put__locked(struct drm_i915_private
*dev_priv
,
561 enum forcewake_domains fw_domains
)
563 lockdep_assert_held(&dev_priv
->uncore
.lock
);
565 if (!dev_priv
->uncore
.funcs
.force_wake_put
)
568 __intel_uncore_forcewake_put(dev_priv
, fw_domains
);
571 void assert_forcewakes_inactive(struct drm_i915_private
*dev_priv
)
573 if (!dev_priv
->uncore
.funcs
.force_wake_get
)
576 WARN_ON(dev_priv
->uncore
.fw_domains_active
);
579 /* We give fast paths for the really cool registers */
580 #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
582 #define __gen6_reg_read_fw_domains(offset) \
584 enum forcewake_domains __fwd; \
585 if (NEEDS_FORCE_WAKE(offset)) \
586 __fwd = FORCEWAKE_RENDER; \
592 static int fw_range_cmp(u32 offset
, const struct intel_forcewake_range
*entry
)
594 if (offset
< entry
->start
)
596 else if (offset
> entry
->end
)
602 /* Copied and "macroized" from lib/bsearch.c */
603 #define BSEARCH(key, base, num, cmp) ({ \
604 unsigned int start__ = 0, end__ = (num); \
605 typeof(base) result__ = NULL; \
606 while (start__ < end__) { \
607 unsigned int mid__ = start__ + (end__ - start__) / 2; \
608 int ret__ = (cmp)((key), (base) + mid__); \
611 } else if (ret__ > 0) { \
612 start__ = mid__ + 1; \
614 result__ = (base) + mid__; \
621 static enum forcewake_domains
622 find_fw_domain(struct drm_i915_private
*dev_priv
, u32 offset
)
624 const struct intel_forcewake_range
*entry
;
626 entry
= BSEARCH(offset
,
627 dev_priv
->uncore
.fw_domains_table
,
628 dev_priv
->uncore
.fw_domains_table_entries
,
634 WARN(entry
->domains
& ~dev_priv
->uncore
.fw_domains
,
635 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
636 entry
->domains
& ~dev_priv
->uncore
.fw_domains
, offset
);
638 return entry
->domains
;
641 #define GEN_FW_RANGE(s, e, d) \
642 { .start = (s), .end = (e), .domains = (d) }
644 #define HAS_FWTABLE(dev_priv) \
645 (IS_GEN9(dev_priv) || \
646 IS_CHERRYVIEW(dev_priv) || \
647 IS_VALLEYVIEW(dev_priv))
649 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
650 static const struct intel_forcewake_range __vlv_fw_ranges
[] = {
651 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER
),
652 GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER
),
653 GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER
),
654 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA
),
655 GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA
),
656 GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER
),
657 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA
),
660 #define __fwtable_reg_read_fw_domains(offset) \
662 enum forcewake_domains __fwd = 0; \
663 if (NEEDS_FORCE_WAKE((offset))) \
664 __fwd = find_fw_domain(dev_priv, offset); \
668 /* *Must* be sorted by offset! See intel_shadow_table_check(). */
669 static const i915_reg_t gen8_shadowed_regs
[] = {
670 RING_TAIL(RENDER_RING_BASE
), /* 0x2000 (base) */
671 GEN6_RPNSWREQ
, /* 0xA008 */
672 GEN6_RC_VIDEO_FREQ
, /* 0xA00C */
673 RING_TAIL(GEN6_BSD_RING_BASE
), /* 0x12000 (base) */
674 RING_TAIL(VEBOX_RING_BASE
), /* 0x1a000 (base) */
675 RING_TAIL(BLT_RING_BASE
), /* 0x22000 (base) */
676 /* TODO: Other registers are not yet used */
679 static int mmio_reg_cmp(u32 key
, const i915_reg_t
*reg
)
681 u32 offset
= i915_mmio_reg_offset(*reg
);
685 else if (key
> offset
)
691 static bool is_gen8_shadowed(u32 offset
)
693 const i915_reg_t
*regs
= gen8_shadowed_regs
;
695 return BSEARCH(offset
, regs
, ARRAY_SIZE(gen8_shadowed_regs
),
699 #define __gen8_reg_write_fw_domains(offset) \
701 enum forcewake_domains __fwd; \
702 if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
703 __fwd = FORCEWAKE_RENDER; \
709 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
710 static const struct intel_forcewake_range __chv_fw_ranges
[] = {
711 GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER
),
712 GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER
| FORCEWAKE_MEDIA
),
713 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER
),
714 GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER
| FORCEWAKE_MEDIA
),
715 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER
),
716 GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER
| FORCEWAKE_MEDIA
),
717 GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA
),
718 GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER
| FORCEWAKE_MEDIA
),
719 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER
),
720 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA
),
721 GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER
),
722 GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER
| FORCEWAKE_MEDIA
),
723 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA
),
724 GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA
),
725 GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA
),
726 GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA
),
729 #define __fwtable_reg_write_fw_domains(offset) \
731 enum forcewake_domains __fwd = 0; \
732 if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
733 __fwd = find_fw_domain(dev_priv, offset); \
737 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
738 static const struct intel_forcewake_range __gen9_fw_ranges
[] = {
739 GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER
),
740 GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
741 GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER
),
742 GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER
),
743 GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER
),
744 GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER
),
745 GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER
),
746 GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_BLITTER
),
747 GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA
),
748 GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER
),
749 GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER
),
750 GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER
),
751 GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_BLITTER
),
752 GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA
),
753 GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_BLITTER
),
754 GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER
),
755 GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER
),
756 GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER
| FORCEWAKE_MEDIA
),
757 GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER
),
758 GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER
),
759 GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_BLITTER
),
760 GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA
),
761 GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_BLITTER
),
762 GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER
),
763 GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_BLITTER
),
764 GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA
),
765 GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_BLITTER
),
766 GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA
),
767 GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_BLITTER
),
768 GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER
),
769 GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_BLITTER
),
770 GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA
),
774 ilk_dummy_write(struct drm_i915_private
*dev_priv
)
776 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
777 * the chip from rc6 before touching it for real. MI_MODE is masked,
778 * hence harmless to write 0 into. */
779 __raw_i915_write32(dev_priv
, MI_MODE
, 0);
783 __unclaimed_reg_debug(struct drm_i915_private
*dev_priv
,
784 const i915_reg_t reg
,
788 if (WARN(check_for_unclaimed_mmio(dev_priv
) && !before
,
789 "Unclaimed %s register 0x%x\n",
790 read
? "read from" : "write to",
791 i915_mmio_reg_offset(reg
)))
792 i915
.mmio_debug
--; /* Only report the first N failures */
796 unclaimed_reg_debug(struct drm_i915_private
*dev_priv
,
797 const i915_reg_t reg
,
801 if (likely(!i915
.mmio_debug
))
804 __unclaimed_reg_debug(dev_priv
, reg
, read
, before
);
807 static const enum decoupled_power_domain fw2dpd_domain
[] = {
808 GEN9_DECOUPLED_PD_RENDER
,
809 GEN9_DECOUPLED_PD_BLITTER
,
810 GEN9_DECOUPLED_PD_ALL
,
811 GEN9_DECOUPLED_PD_MEDIA
,
812 GEN9_DECOUPLED_PD_ALL
,
813 GEN9_DECOUPLED_PD_ALL
,
814 GEN9_DECOUPLED_PD_ALL
818 * Decoupled MMIO access for only 1 DWORD
820 static void __gen9_decoupled_mmio_access(struct drm_i915_private
*dev_priv
,
822 enum forcewake_domains fw_domain
,
823 enum decoupled_ops operation
)
825 enum decoupled_power_domain dp_domain
;
826 u32 ctrl_reg_data
= 0;
828 dp_domain
= fw2dpd_domain
[fw_domain
- 1];
830 ctrl_reg_data
|= reg
;
831 ctrl_reg_data
|= (operation
<< GEN9_DECOUPLED_OP_SHIFT
);
832 ctrl_reg_data
|= (dp_domain
<< GEN9_DECOUPLED_PD_SHIFT
);
833 ctrl_reg_data
|= GEN9_DECOUPLED_DW1_GO
;
834 __raw_i915_write32(dev_priv
, GEN9_DECOUPLED_REG0_DW1
, ctrl_reg_data
);
836 if (wait_for_atomic((__raw_i915_read32(dev_priv
,
837 GEN9_DECOUPLED_REG0_DW1
) &
838 GEN9_DECOUPLED_DW1_GO
) == 0,
839 FORCEWAKE_ACK_TIMEOUT_MS
))
840 DRM_ERROR("Decoupled MMIO wait timed out\n");
844 __gen9_decoupled_mmio_read32(struct drm_i915_private
*dev_priv
,
846 enum forcewake_domains fw_domain
)
848 __gen9_decoupled_mmio_access(dev_priv
, reg
, fw_domain
,
849 GEN9_DECOUPLED_OP_READ
);
851 return __raw_i915_read32(dev_priv
, GEN9_DECOUPLED_REG0_DW0
);
855 __gen9_decoupled_mmio_write(struct drm_i915_private
*dev_priv
,
857 enum forcewake_domains fw_domain
)
860 __raw_i915_write32(dev_priv
, GEN9_DECOUPLED_REG0_DW0
, data
);
862 __gen9_decoupled_mmio_access(dev_priv
, reg
, fw_domain
,
863 GEN9_DECOUPLED_OP_WRITE
);
867 #define GEN2_READ_HEADER(x) \
869 assert_rpm_wakelock_held(dev_priv);
871 #define GEN2_READ_FOOTER \
872 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
875 #define __gen2_read(x) \
877 gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
878 GEN2_READ_HEADER(x); \
879 val = __raw_i915_read##x(dev_priv, reg); \
883 #define __gen5_read(x) \
885 gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
886 GEN2_READ_HEADER(x); \
887 ilk_dummy_write(dev_priv); \
888 val = __raw_i915_read##x(dev_priv, reg); \
904 #undef GEN2_READ_FOOTER
905 #undef GEN2_READ_HEADER
907 #define GEN6_READ_HEADER(x) \
908 u32 offset = i915_mmio_reg_offset(reg); \
909 unsigned long irqflags; \
911 assert_rpm_wakelock_held(dev_priv); \
912 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
913 unclaimed_reg_debug(dev_priv, reg, true, true)
915 #define GEN6_READ_FOOTER \
916 unclaimed_reg_debug(dev_priv, reg, true, false); \
917 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
918 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
921 static noinline
void ___force_wake_auto(struct drm_i915_private
*dev_priv
,
922 enum forcewake_domains fw_domains
)
924 struct intel_uncore_forcewake_domain
*domain
;
927 GEM_BUG_ON(fw_domains
& ~dev_priv
->uncore
.fw_domains
);
929 for_each_fw_domain_masked(domain
, fw_domains
, dev_priv
, tmp
)
930 fw_domain_arm_timer(domain
);
932 dev_priv
->uncore
.funcs
.force_wake_get(dev_priv
, fw_domains
);
935 static inline void __force_wake_auto(struct drm_i915_private
*dev_priv
,
936 enum forcewake_domains fw_domains
)
938 if (WARN_ON(!fw_domains
))
941 /* Turn on all requested but inactive supported forcewake domains. */
942 fw_domains
&= dev_priv
->uncore
.fw_domains
;
943 fw_domains
&= ~dev_priv
->uncore
.fw_domains_active
;
946 ___force_wake_auto(dev_priv
, fw_domains
);
949 #define __gen_read(func, x) \
951 func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
952 enum forcewake_domains fw_engine; \
953 GEN6_READ_HEADER(x); \
954 fw_engine = __##func##_reg_read_fw_domains(offset); \
956 __force_wake_auto(dev_priv, fw_engine); \
957 val = __raw_i915_read##x(dev_priv, reg); \
960 #define __gen6_read(x) __gen_read(gen6, x)
961 #define __fwtable_read(x) __gen_read(fwtable, x)
963 #define __gen9_decoupled_read(x) \
965 gen9_decoupled_read##x(struct drm_i915_private *dev_priv, \
966 i915_reg_t reg, bool trace) { \
967 enum forcewake_domains fw_engine; \
968 GEN6_READ_HEADER(x); \
969 fw_engine = __fwtable_reg_read_fw_domains(offset); \
970 if (fw_engine & ~dev_priv->uncore.fw_domains_active) { \
972 u32 *ptr_data = (u32 *) &val; \
973 for (i = 0; i < x/32; i++, offset += sizeof(u32), ptr_data++) \
974 *ptr_data = __gen9_decoupled_mmio_read32(dev_priv, \
978 val = __raw_i915_read##x(dev_priv, reg); \
983 __gen9_decoupled_read(32)
984 __gen9_decoupled_read(64)
994 #undef __fwtable_read
996 #undef GEN6_READ_FOOTER
997 #undef GEN6_READ_HEADER
999 #define GEN2_WRITE_HEADER \
1000 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1001 assert_rpm_wakelock_held(dev_priv); \
1003 #define GEN2_WRITE_FOOTER
1005 #define __gen2_write(x) \
1007 gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1008 GEN2_WRITE_HEADER; \
1009 __raw_i915_write##x(dev_priv, reg, val); \
1010 GEN2_WRITE_FOOTER; \
1013 #define __gen5_write(x) \
1015 gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1016 GEN2_WRITE_HEADER; \
1017 ilk_dummy_write(dev_priv); \
1018 __raw_i915_write##x(dev_priv, reg, val); \
1019 GEN2_WRITE_FOOTER; \
1032 #undef GEN2_WRITE_FOOTER
1033 #undef GEN2_WRITE_HEADER
1035 #define GEN6_WRITE_HEADER \
1036 u32 offset = i915_mmio_reg_offset(reg); \
1037 unsigned long irqflags; \
1038 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1039 assert_rpm_wakelock_held(dev_priv); \
1040 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
1041 unclaimed_reg_debug(dev_priv, reg, false, true)
1043 #define GEN6_WRITE_FOOTER \
1044 unclaimed_reg_debug(dev_priv, reg, false, false); \
1045 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
1047 #define __gen6_write(x) \
1049 gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1050 u32 __fifo_ret = 0; \
1051 GEN6_WRITE_HEADER; \
1052 if (NEEDS_FORCE_WAKE(offset)) { \
1053 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1055 __raw_i915_write##x(dev_priv, reg, val); \
1056 if (unlikely(__fifo_ret)) { \
1057 gen6_gt_check_fifodbg(dev_priv); \
1059 GEN6_WRITE_FOOTER; \
1062 #define __gen_write(func, x) \
1064 func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
1065 enum forcewake_domains fw_engine; \
1066 GEN6_WRITE_HEADER; \
1067 fw_engine = __##func##_reg_write_fw_domains(offset); \
1069 __force_wake_auto(dev_priv, fw_engine); \
1070 __raw_i915_write##x(dev_priv, reg, val); \
1071 GEN6_WRITE_FOOTER; \
1073 #define __gen8_write(x) __gen_write(gen8, x)
1074 #define __fwtable_write(x) __gen_write(fwtable, x)
1076 #define __gen9_decoupled_write(x) \
1078 gen9_decoupled_write##x(struct drm_i915_private *dev_priv, \
1079 i915_reg_t reg, u##x val, \
1081 enum forcewake_domains fw_engine; \
1082 GEN6_WRITE_HEADER; \
1083 fw_engine = __fwtable_reg_write_fw_domains(offset); \
1084 if (fw_engine & ~dev_priv->uncore.fw_domains_active) \
1085 __gen9_decoupled_mmio_write(dev_priv, \
1090 __raw_i915_write##x(dev_priv, reg, val); \
1091 GEN6_WRITE_FOOTER; \
1094 __gen9_decoupled_write(32)
1105 #undef __fwtable_write
1108 #undef GEN6_WRITE_FOOTER
1109 #undef GEN6_WRITE_HEADER
1111 #define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \
1113 (i915)->uncore.funcs.mmio_writeb = x##_write8; \
1114 (i915)->uncore.funcs.mmio_writew = x##_write16; \
1115 (i915)->uncore.funcs.mmio_writel = x##_write32; \
1118 #define ASSIGN_READ_MMIO_VFUNCS(i915, x) \
1120 (i915)->uncore.funcs.mmio_readb = x##_read8; \
1121 (i915)->uncore.funcs.mmio_readw = x##_read16; \
1122 (i915)->uncore.funcs.mmio_readl = x##_read32; \
1123 (i915)->uncore.funcs.mmio_readq = x##_read64; \
1127 static void fw_domain_init(struct drm_i915_private
*dev_priv
,
1128 enum forcewake_domain_id domain_id
,
1132 struct intel_uncore_forcewake_domain
*d
;
1134 if (WARN_ON(domain_id
>= FW_DOMAIN_ID_COUNT
))
1137 d
= &dev_priv
->uncore
.fw_domain
[domain_id
];
1139 WARN_ON(d
->wake_count
);
1141 WARN_ON(!i915_mmio_reg_valid(reg_set
));
1142 WARN_ON(!i915_mmio_reg_valid(reg_ack
));
1145 d
->reg_set
= reg_set
;
1146 d
->reg_ack
= reg_ack
;
1150 BUILD_BUG_ON(FORCEWAKE_RENDER
!= (1 << FW_DOMAIN_ID_RENDER
));
1151 BUILD_BUG_ON(FORCEWAKE_BLITTER
!= (1 << FW_DOMAIN_ID_BLITTER
));
1152 BUILD_BUG_ON(FORCEWAKE_MEDIA
!= (1 << FW_DOMAIN_ID_MEDIA
));
1154 d
->mask
= BIT(domain_id
);
1156 hrtimer_init(&d
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1157 d
->timer
.function
= intel_uncore_fw_release_timer
;
1159 dev_priv
->uncore
.fw_domains
|= BIT(domain_id
);
1161 fw_domain_reset(dev_priv
, d
);
1164 static void intel_uncore_fw_domains_init(struct drm_i915_private
*dev_priv
)
1166 if (INTEL_GEN(dev_priv
) <= 5 || intel_vgpu_active(dev_priv
))
1169 if (IS_GEN6(dev_priv
)) {
1170 dev_priv
->uncore
.fw_reset
= 0;
1171 dev_priv
->uncore
.fw_set
= FORCEWAKE_KERNEL
;
1172 dev_priv
->uncore
.fw_clear
= 0;
1174 /* WaRsClearFWBitsAtReset:bdw,skl */
1175 dev_priv
->uncore
.fw_reset
= _MASKED_BIT_DISABLE(0xffff);
1176 dev_priv
->uncore
.fw_set
= _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL
);
1177 dev_priv
->uncore
.fw_clear
= _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL
);
1180 if (IS_GEN9(dev_priv
)) {
1181 dev_priv
->uncore
.funcs
.force_wake_get
= fw_domains_get
;
1182 dev_priv
->uncore
.funcs
.force_wake_put
= fw_domains_put
;
1183 fw_domain_init(dev_priv
, FW_DOMAIN_ID_RENDER
,
1184 FORCEWAKE_RENDER_GEN9
,
1185 FORCEWAKE_ACK_RENDER_GEN9
);
1186 fw_domain_init(dev_priv
, FW_DOMAIN_ID_BLITTER
,
1187 FORCEWAKE_BLITTER_GEN9
,
1188 FORCEWAKE_ACK_BLITTER_GEN9
);
1189 fw_domain_init(dev_priv
, FW_DOMAIN_ID_MEDIA
,
1190 FORCEWAKE_MEDIA_GEN9
, FORCEWAKE_ACK_MEDIA_GEN9
);
1191 } else if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
1192 dev_priv
->uncore
.funcs
.force_wake_get
= fw_domains_get
;
1193 if (!IS_CHERRYVIEW(dev_priv
))
1194 dev_priv
->uncore
.funcs
.force_wake_put
=
1195 fw_domains_put_with_fifo
;
1197 dev_priv
->uncore
.funcs
.force_wake_put
= fw_domains_put
;
1198 fw_domain_init(dev_priv
, FW_DOMAIN_ID_RENDER
,
1199 FORCEWAKE_VLV
, FORCEWAKE_ACK_VLV
);
1200 fw_domain_init(dev_priv
, FW_DOMAIN_ID_MEDIA
,
1201 FORCEWAKE_MEDIA_VLV
, FORCEWAKE_ACK_MEDIA_VLV
);
1202 } else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
)) {
1203 dev_priv
->uncore
.funcs
.force_wake_get
=
1204 fw_domains_get_with_thread_status
;
1205 if (IS_HASWELL(dev_priv
))
1206 dev_priv
->uncore
.funcs
.force_wake_put
=
1207 fw_domains_put_with_fifo
;
1209 dev_priv
->uncore
.funcs
.force_wake_put
= fw_domains_put
;
1210 fw_domain_init(dev_priv
, FW_DOMAIN_ID_RENDER
,
1211 FORCEWAKE_MT
, FORCEWAKE_ACK_HSW
);
1212 } else if (IS_IVYBRIDGE(dev_priv
)) {
1215 /* IVB configs may use multi-threaded forcewake */
1217 /* A small trick here - if the bios hasn't configured
1218 * MT forcewake, and if the device is in RC6, then
1219 * force_wake_mt_get will not wake the device and the
1220 * ECOBUS read will return zero. Which will be
1221 * (correctly) interpreted by the test below as MT
1222 * forcewake being disabled.
1224 dev_priv
->uncore
.funcs
.force_wake_get
=
1225 fw_domains_get_with_thread_status
;
1226 dev_priv
->uncore
.funcs
.force_wake_put
=
1227 fw_domains_put_with_fifo
;
1229 /* We need to init first for ECOBUS access and then
1230 * determine later if we want to reinit, in case of MT access is
1231 * not working. In this stage we don't know which flavour this
1232 * ivb is, so it is better to reset also the gen6 fw registers
1233 * before the ecobus check.
1236 __raw_i915_write32(dev_priv
, FORCEWAKE
, 0);
1237 __raw_posting_read(dev_priv
, ECOBUS
);
1239 fw_domain_init(dev_priv
, FW_DOMAIN_ID_RENDER
,
1240 FORCEWAKE_MT
, FORCEWAKE_MT_ACK
);
1242 spin_lock_irq(&dev_priv
->uncore
.lock
);
1243 fw_domains_get_with_thread_status(dev_priv
, FORCEWAKE_RENDER
);
1244 ecobus
= __raw_i915_read32(dev_priv
, ECOBUS
);
1245 fw_domains_put_with_fifo(dev_priv
, FORCEWAKE_RENDER
);
1246 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1248 if (!(ecobus
& FORCEWAKE_MT_ENABLE
)) {
1249 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
1250 DRM_INFO("when using vblank-synced partial screen updates.\n");
1251 fw_domain_init(dev_priv
, FW_DOMAIN_ID_RENDER
,
1252 FORCEWAKE
, FORCEWAKE_ACK
);
1254 } else if (IS_GEN6(dev_priv
)) {
1255 dev_priv
->uncore
.funcs
.force_wake_get
=
1256 fw_domains_get_with_thread_status
;
1257 dev_priv
->uncore
.funcs
.force_wake_put
=
1258 fw_domains_put_with_fifo
;
1259 fw_domain_init(dev_priv
, FW_DOMAIN_ID_RENDER
,
1260 FORCEWAKE
, FORCEWAKE_ACK
);
1263 /* All future platforms are expected to require complex power gating */
1264 WARN_ON(dev_priv
->uncore
.fw_domains
== 0);
1267 #define ASSIGN_FW_DOMAINS_TABLE(d) \
1269 dev_priv->uncore.fw_domains_table = \
1270 (struct intel_forcewake_range *)(d); \
1271 dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
1274 static int i915_pmic_bus_access_notifier(struct notifier_block
*nb
,
1275 unsigned long action
, void *data
)
1277 struct drm_i915_private
*dev_priv
= container_of(nb
,
1278 struct drm_i915_private
, uncore
.pmic_bus_access_nb
);
1281 case MBI_PMIC_BUS_ACCESS_BEGIN
:
1283 * forcewake all now to make sure that we don't need to do a
1284 * forcewake later which on systems where this notifier gets
1285 * called requires the punit to access to the shared pmic i2c
1286 * bus, which will be busy after this notification, leading to:
1287 * "render: timed out waiting for forcewake ack request."
1290 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
1292 case MBI_PMIC_BUS_ACCESS_END
:
1293 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
1300 void intel_uncore_init(struct drm_i915_private
*dev_priv
)
1302 i915_check_vgpu(dev_priv
);
1304 intel_uncore_edram_detect(dev_priv
);
1305 intel_uncore_fw_domains_init(dev_priv
);
1306 __intel_uncore_early_sanitize(dev_priv
, false);
1308 dev_priv
->uncore
.unclaimed_mmio_check
= 1;
1309 dev_priv
->uncore
.pmic_bus_access_nb
.notifier_call
=
1310 i915_pmic_bus_access_notifier
;
1312 if (IS_GEN(dev_priv
, 2, 4) || intel_vgpu_active(dev_priv
)) {
1313 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv
, gen2
);
1314 ASSIGN_READ_MMIO_VFUNCS(dev_priv
, gen2
);
1315 } else if (IS_GEN5(dev_priv
)) {
1316 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv
, gen5
);
1317 ASSIGN_READ_MMIO_VFUNCS(dev_priv
, gen5
);
1318 } else if (IS_GEN(dev_priv
, 6, 7)) {
1319 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv
, gen6
);
1321 if (IS_VALLEYVIEW(dev_priv
)) {
1322 ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges
);
1323 ASSIGN_READ_MMIO_VFUNCS(dev_priv
, fwtable
);
1325 ASSIGN_READ_MMIO_VFUNCS(dev_priv
, gen6
);
1327 } else if (IS_GEN8(dev_priv
)) {
1328 if (IS_CHERRYVIEW(dev_priv
)) {
1329 ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges
);
1330 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv
, fwtable
);
1331 ASSIGN_READ_MMIO_VFUNCS(dev_priv
, fwtable
);
1334 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv
, gen8
);
1335 ASSIGN_READ_MMIO_VFUNCS(dev_priv
, gen6
);
1338 ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges
);
1339 ASSIGN_WRITE_MMIO_VFUNCS(dev_priv
, fwtable
);
1340 ASSIGN_READ_MMIO_VFUNCS(dev_priv
, fwtable
);
1341 if (HAS_DECOUPLED_MMIO(dev_priv
)) {
1342 dev_priv
->uncore
.funcs
.mmio_readl
=
1343 gen9_decoupled_read32
;
1344 dev_priv
->uncore
.funcs
.mmio_readq
=
1345 gen9_decoupled_read64
;
1346 dev_priv
->uncore
.funcs
.mmio_writel
=
1347 gen9_decoupled_write32
;
1351 iosf_mbi_register_pmic_bus_access_notifier(
1352 &dev_priv
->uncore
.pmic_bus_access_nb
);
1354 i915_check_and_clear_faults(dev_priv
);
1357 void intel_uncore_fini(struct drm_i915_private
*dev_priv
)
1359 iosf_mbi_unregister_pmic_bus_access_notifier(
1360 &dev_priv
->uncore
.pmic_bus_access_nb
);
1362 /* Paranoia: make sure we have disabled everything before we exit. */
1363 intel_uncore_sanitize(dev_priv
);
1364 intel_uncore_forcewake_reset(dev_priv
, false);
1367 #define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
1369 static const struct register_whitelist
{
1370 i915_reg_t offset_ldw
, offset_udw
;
1372 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
1373 uint32_t gen_bitmask
;
1375 { .offset_ldw
= RING_TIMESTAMP(RENDER_RING_BASE
),
1376 .offset_udw
= RING_TIMESTAMP_UDW(RENDER_RING_BASE
),
1377 .size
= 8, .gen_bitmask
= GEN_RANGE(4, 9) },
1380 int i915_reg_read_ioctl(struct drm_device
*dev
,
1381 void *data
, struct drm_file
*file
)
1383 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1384 struct drm_i915_reg_read
*reg
= data
;
1385 struct register_whitelist
const *entry
= whitelist
;
1387 i915_reg_t offset_ldw
, offset_udw
;
1390 for (i
= 0; i
< ARRAY_SIZE(whitelist
); i
++, entry
++) {
1391 if (i915_mmio_reg_offset(entry
->offset_ldw
) == (reg
->offset
& -entry
->size
) &&
1392 (INTEL_INFO(dev_priv
)->gen_mask
& entry
->gen_bitmask
))
1396 if (i
== ARRAY_SIZE(whitelist
))
1399 /* We use the low bits to encode extra flags as the register should
1400 * be naturally aligned (and those that are not so aligned merely
1401 * limit the available flags for that register).
1403 offset_ldw
= entry
->offset_ldw
;
1404 offset_udw
= entry
->offset_udw
;
1406 size
|= reg
->offset
^ i915_mmio_reg_offset(offset_ldw
);
1408 intel_runtime_pm_get(dev_priv
);
1412 reg
->val
= I915_READ64_2x32(offset_ldw
, offset_udw
);
1415 reg
->val
= I915_READ64(offset_ldw
);
1418 reg
->val
= I915_READ(offset_ldw
);
1421 reg
->val
= I915_READ16(offset_ldw
);
1424 reg
->val
= I915_READ8(offset_ldw
);
1432 intel_runtime_pm_put(dev_priv
);
1436 static int i915_reset_complete(struct pci_dev
*pdev
)
1439 pci_read_config_byte(pdev
, I915_GDRST
, &gdrst
);
1440 return (gdrst
& GRDOM_RESET_STATUS
) == 0;
1443 static int i915_do_reset(struct drm_i915_private
*dev_priv
, unsigned engine_mask
)
1445 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
1447 /* assert reset for at least 20 usec */
1448 pci_write_config_byte(pdev
, I915_GDRST
, GRDOM_RESET_ENABLE
);
1450 pci_write_config_byte(pdev
, I915_GDRST
, 0);
1452 return wait_for(i915_reset_complete(pdev
), 500);
1455 static int g4x_reset_complete(struct pci_dev
*pdev
)
1458 pci_read_config_byte(pdev
, I915_GDRST
, &gdrst
);
1459 return (gdrst
& GRDOM_RESET_ENABLE
) == 0;
1462 static int g33_do_reset(struct drm_i915_private
*dev_priv
, unsigned engine_mask
)
1464 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
1465 pci_write_config_byte(pdev
, I915_GDRST
, GRDOM_RESET_ENABLE
);
1466 return wait_for(g4x_reset_complete(pdev
), 500);
1469 static int g4x_do_reset(struct drm_i915_private
*dev_priv
, unsigned engine_mask
)
1471 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
1474 pci_write_config_byte(pdev
, I915_GDRST
,
1475 GRDOM_RENDER
| GRDOM_RESET_ENABLE
);
1476 ret
= wait_for(g4x_reset_complete(pdev
), 500);
1480 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1481 I915_WRITE(VDECCLK_GATE_D
, I915_READ(VDECCLK_GATE_D
) | VCP_UNIT_CLOCK_GATE_DISABLE
);
1482 POSTING_READ(VDECCLK_GATE_D
);
1484 pci_write_config_byte(pdev
, I915_GDRST
,
1485 GRDOM_MEDIA
| GRDOM_RESET_ENABLE
);
1486 ret
= wait_for(g4x_reset_complete(pdev
), 500);
1490 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
1491 I915_WRITE(VDECCLK_GATE_D
, I915_READ(VDECCLK_GATE_D
) & ~VCP_UNIT_CLOCK_GATE_DISABLE
);
1492 POSTING_READ(VDECCLK_GATE_D
);
1494 pci_write_config_byte(pdev
, I915_GDRST
, 0);
1499 static int ironlake_do_reset(struct drm_i915_private
*dev_priv
,
1500 unsigned engine_mask
)
1504 I915_WRITE(ILK_GDSR
,
1505 ILK_GRDOM_RENDER
| ILK_GRDOM_RESET_ENABLE
);
1506 ret
= intel_wait_for_register(dev_priv
,
1507 ILK_GDSR
, ILK_GRDOM_RESET_ENABLE
, 0,
1512 I915_WRITE(ILK_GDSR
,
1513 ILK_GRDOM_MEDIA
| ILK_GRDOM_RESET_ENABLE
);
1514 ret
= intel_wait_for_register(dev_priv
,
1515 ILK_GDSR
, ILK_GRDOM_RESET_ENABLE
, 0,
1520 I915_WRITE(ILK_GDSR
, 0);
1525 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
1526 static int gen6_hw_domain_reset(struct drm_i915_private
*dev_priv
,
1529 /* GEN6_GDRST is not in the gt power well, no need to check
1530 * for fifo space for the write or forcewake the chip for
1533 __raw_i915_write32(dev_priv
, GEN6_GDRST
, hw_domain_mask
);
1535 /* Spin waiting for the device to ack the reset requests */
1536 return intel_wait_for_register_fw(dev_priv
,
1537 GEN6_GDRST
, hw_domain_mask
, 0,
1542 * gen6_reset_engines - reset individual engines
1543 * @dev_priv: i915 device
1544 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1546 * This function will reset the individual engines that are set in engine_mask.
1547 * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
1549 * Note: It is responsibility of the caller to handle the difference between
1550 * asking full domain reset versus reset for all available individual engines.
1552 * Returns 0 on success, nonzero on error.
1554 static int gen6_reset_engines(struct drm_i915_private
*dev_priv
,
1555 unsigned engine_mask
)
1557 struct intel_engine_cs
*engine
;
1558 const u32 hw_engine_mask
[I915_NUM_ENGINES
] = {
1559 [RCS
] = GEN6_GRDOM_RENDER
,
1560 [BCS
] = GEN6_GRDOM_BLT
,
1561 [VCS
] = GEN6_GRDOM_MEDIA
,
1562 [VCS2
] = GEN8_GRDOM_MEDIA2
,
1563 [VECS
] = GEN6_GRDOM_VECS
,
1568 if (engine_mask
== ALL_ENGINES
) {
1569 hw_mask
= GEN6_GRDOM_FULL
;
1574 for_each_engine_masked(engine
, dev_priv
, engine_mask
, tmp
)
1575 hw_mask
|= hw_engine_mask
[engine
->id
];
1578 ret
= gen6_hw_domain_reset(dev_priv
, hw_mask
);
1580 intel_uncore_forcewake_reset(dev_priv
, true);
1586 * __intel_wait_for_register_fw - wait until register matches expected state
1587 * @dev_priv: the i915 device
1588 * @reg: the register to read
1589 * @mask: mask to apply to register value
1590 * @value: expected value
1591 * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
1592 * @slow_timeout_ms: slow timeout in millisecond
1593 * @out_value: optional placeholder to hold registry value
1595 * This routine waits until the target register @reg contains the expected
1596 * @value after applying the @mask, i.e. it waits until ::
1598 * (I915_READ_FW(reg) & mask) == value
1600 * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
1601 * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
1602 * must be not larger than 20,0000 microseconds.
1604 * Note that this routine assumes the caller holds forcewake asserted, it is
1605 * not suitable for very long waits. See intel_wait_for_register() if you
1606 * wish to wait without holding forcewake for the duration (i.e. you expect
1607 * the wait to be slow).
1609 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1611 int __intel_wait_for_register_fw(struct drm_i915_private
*dev_priv
,
1615 unsigned int fast_timeout_us
,
1616 unsigned int slow_timeout_ms
,
1620 #define done (((reg_value = I915_READ_FW(reg)) & mask) == value)
1623 /* Catch any overuse of this function */
1624 might_sleep_if(slow_timeout_ms
);
1625 GEM_BUG_ON(fast_timeout_us
> 20000);
1628 if (fast_timeout_us
&& fast_timeout_us
<= 20000)
1629 ret
= _wait_for_atomic(done
, fast_timeout_us
, 0);
1631 ret
= wait_for(done
, slow_timeout_ms
);
1634 *out_value
= reg_value
;
1641 * intel_wait_for_register - wait until register matches expected state
1642 * @dev_priv: the i915 device
1643 * @reg: the register to read
1644 * @mask: mask to apply to register value
1645 * @value: expected value
1646 * @timeout_ms: timeout in millisecond
1648 * This routine waits until the target register @reg contains the expected
1649 * @value after applying the @mask, i.e. it waits until ::
1651 * (I915_READ(reg) & mask) == value
1653 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1655 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1657 int intel_wait_for_register(struct drm_i915_private
*dev_priv
,
1661 unsigned int timeout_ms
)
1664 intel_uncore_forcewake_for_reg(dev_priv
, reg
, FW_REG_READ
);
1669 spin_lock_irq(&dev_priv
->uncore
.lock
);
1670 intel_uncore_forcewake_get__locked(dev_priv
, fw
);
1672 ret
= __intel_wait_for_register_fw(dev_priv
,
1676 intel_uncore_forcewake_put__locked(dev_priv
, fw
);
1677 spin_unlock_irq(&dev_priv
->uncore
.lock
);
1680 ret
= wait_for((I915_READ_NOTRACE(reg
) & mask
) == value
,
1686 static int gen8_request_engine_reset(struct intel_engine_cs
*engine
)
1688 struct drm_i915_private
*dev_priv
= engine
->i915
;
1691 I915_WRITE_FW(RING_RESET_CTL(engine
->mmio_base
),
1692 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET
));
1694 ret
= intel_wait_for_register_fw(dev_priv
,
1695 RING_RESET_CTL(engine
->mmio_base
),
1696 RESET_CTL_READY_TO_RESET
,
1697 RESET_CTL_READY_TO_RESET
,
1700 DRM_ERROR("%s: reset request timeout\n", engine
->name
);
1705 static void gen8_unrequest_engine_reset(struct intel_engine_cs
*engine
)
1707 struct drm_i915_private
*dev_priv
= engine
->i915
;
1709 I915_WRITE_FW(RING_RESET_CTL(engine
->mmio_base
),
1710 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET
));
1713 static int gen8_reset_engines(struct drm_i915_private
*dev_priv
,
1714 unsigned engine_mask
)
1716 struct intel_engine_cs
*engine
;
1719 for_each_engine_masked(engine
, dev_priv
, engine_mask
, tmp
)
1720 if (gen8_request_engine_reset(engine
))
1723 return gen6_reset_engines(dev_priv
, engine_mask
);
1726 for_each_engine_masked(engine
, dev_priv
, engine_mask
, tmp
)
1727 gen8_unrequest_engine_reset(engine
);
1732 typedef int (*reset_func
)(struct drm_i915_private
*, unsigned engine_mask
);
1734 static reset_func
intel_get_gpu_reset(struct drm_i915_private
*dev_priv
)
1739 if (INTEL_INFO(dev_priv
)->gen
>= 8)
1740 return gen8_reset_engines
;
1741 else if (INTEL_INFO(dev_priv
)->gen
>= 6)
1742 return gen6_reset_engines
;
1743 else if (IS_GEN5(dev_priv
))
1744 return ironlake_do_reset
;
1745 else if (IS_G4X(dev_priv
))
1746 return g4x_do_reset
;
1747 else if (IS_G33(dev_priv
) || IS_PINEVIEW(dev_priv
))
1748 return g33_do_reset
;
1749 else if (INTEL_INFO(dev_priv
)->gen
>= 3)
1750 return i915_do_reset
;
1755 int intel_gpu_reset(struct drm_i915_private
*dev_priv
, unsigned engine_mask
)
1760 reset
= intel_get_gpu_reset(dev_priv
);
1764 /* If the power well sleeps during the reset, the reset
1765 * request may be dropped and never completes (causing -EIO).
1767 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
1768 ret
= reset(dev_priv
, engine_mask
);
1769 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
1774 bool intel_has_gpu_reset(struct drm_i915_private
*dev_priv
)
1776 return intel_get_gpu_reset(dev_priv
) != NULL
;
1779 int intel_guc_reset(struct drm_i915_private
*dev_priv
)
1782 unsigned long irqflags
;
1784 if (!HAS_GUC(dev_priv
))
1787 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
1788 spin_lock_irqsave(&dev_priv
->uncore
.lock
, irqflags
);
1790 ret
= gen6_hw_domain_reset(dev_priv
, GEN9_GRDOM_GUC
);
1792 spin_unlock_irqrestore(&dev_priv
->uncore
.lock
, irqflags
);
1793 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
1798 bool intel_uncore_unclaimed_mmio(struct drm_i915_private
*dev_priv
)
1800 return check_for_unclaimed_mmio(dev_priv
);
1804 intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private
*dev_priv
)
1806 if (unlikely(i915
.mmio_debug
||
1807 dev_priv
->uncore
.unclaimed_mmio_check
<= 0))
1810 if (unlikely(intel_uncore_unclaimed_mmio(dev_priv
))) {
1811 DRM_DEBUG("Unclaimed register detected, "
1812 "enabling oneshot unclaimed register reporting. "
1813 "Please use i915.mmio_debug=N for more information.\n");
1815 dev_priv
->uncore
.unclaimed_mmio_check
--;
1822 static enum forcewake_domains
1823 intel_uncore_forcewake_for_read(struct drm_i915_private
*dev_priv
,
1826 u32 offset
= i915_mmio_reg_offset(reg
);
1827 enum forcewake_domains fw_domains
;
1829 if (HAS_FWTABLE(dev_priv
)) {
1830 fw_domains
= __fwtable_reg_read_fw_domains(offset
);
1831 } else if (INTEL_GEN(dev_priv
) >= 6) {
1832 fw_domains
= __gen6_reg_read_fw_domains(offset
);
1834 WARN_ON(!IS_GEN(dev_priv
, 2, 5));
1838 WARN_ON(fw_domains
& ~dev_priv
->uncore
.fw_domains
);
1843 static enum forcewake_domains
1844 intel_uncore_forcewake_for_write(struct drm_i915_private
*dev_priv
,
1847 u32 offset
= i915_mmio_reg_offset(reg
);
1848 enum forcewake_domains fw_domains
;
1850 if (HAS_FWTABLE(dev_priv
) && !IS_VALLEYVIEW(dev_priv
)) {
1851 fw_domains
= __fwtable_reg_write_fw_domains(offset
);
1852 } else if (IS_GEN8(dev_priv
)) {
1853 fw_domains
= __gen8_reg_write_fw_domains(offset
);
1854 } else if (IS_GEN(dev_priv
, 6, 7)) {
1855 fw_domains
= FORCEWAKE_RENDER
;
1857 WARN_ON(!IS_GEN(dev_priv
, 2, 5));
1861 WARN_ON(fw_domains
& ~dev_priv
->uncore
.fw_domains
);
1867 * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
1869 * @dev_priv: pointer to struct drm_i915_private
1870 * @reg: register in question
1871 * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
1873 * Returns a set of forcewake domains required to be taken with for example
1874 * intel_uncore_forcewake_get for the specified register to be accessible in the
1875 * specified mode (read, write or read/write) with raw mmio accessors.
1877 * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
1878 * callers to do FIFO management on their own or risk losing writes.
1880 enum forcewake_domains
1881 intel_uncore_forcewake_for_reg(struct drm_i915_private
*dev_priv
,
1882 i915_reg_t reg
, unsigned int op
)
1884 enum forcewake_domains fw_domains
= 0;
1888 if (intel_vgpu_active(dev_priv
))
1891 if (op
& FW_REG_READ
)
1892 fw_domains
= intel_uncore_forcewake_for_read(dev_priv
, reg
);
1894 if (op
& FW_REG_WRITE
)
1895 fw_domains
|= intel_uncore_forcewake_for_write(dev_priv
, reg
);
1900 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1901 #include "selftests/mock_uncore.c"
1902 #include "selftests/intel_uncore.c"