2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Vinit Azad <vinit.azad@intel.com>
25 * Ben Widawsky <ben@bwidawsk.net>
26 * Dave Gordon <david.s.gordon@intel.com>
27 * Alex Dai <yu.dai@intel.com>
29 #include <linux/firmware.h>
31 #include "intel_guc.h"
37 * Top level structure of guc. It handles firmware loading and manages client
38 * pool and doorbells. intel_guc owns a i915_guc_client to replace the legacy
39 * ExecList submission.
41 * Firmware versioning:
42 * The firmware build process will generate a version header file with major and
43 * minor version defined. The versions are built into CSS header of firmware.
44 * i915 kernel driver set the minimal firmware version required per platform.
45 * The firmware installation package will install (symbolic link) proper version
49 * GuC does not allow any gfx GGTT address that falls into range [0, WOPCM_TOP),
50 * which is reserved for Boot ROM, SRAM and WOPCM. Currently this top address is
51 * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
52 * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
55 * Firmware log is enabled by setting i915.guc_log_level to non-negative level.
56 * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
57 * i915_guc_load_status will print out firmware loading status and scratch
62 #define I915_SKL_GUC_UCODE "i915/skl_guc_ver4.bin"
63 MODULE_FIRMWARE(I915_SKL_GUC_UCODE
);
65 /* User-friendly representation of an enum */
66 const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status
)
69 case GUC_FIRMWARE_FAIL
:
71 case GUC_FIRMWARE_NONE
:
73 case GUC_FIRMWARE_PENDING
:
75 case GUC_FIRMWARE_SUCCESS
:
82 static void direct_interrupts_to_host(struct drm_i915_private
*dev_priv
)
84 struct intel_engine_cs
*ring
;
87 /* tell all command streamers NOT to forward interrupts and vblank to GuC */
88 irqs
= _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK
, GFX_FORWARD_VBLANK_NEVER
);
89 irqs
|= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING
);
90 for_each_ring(ring
, dev_priv
, i
)
91 I915_WRITE(RING_MODE_GEN7(ring
), irqs
);
93 /* route all GT interrupts to the host */
94 I915_WRITE(GUC_BCS_RCS_IER
, 0);
95 I915_WRITE(GUC_VCS2_VCS1_IER
, 0);
96 I915_WRITE(GUC_WD_VECS_IER
, 0);
100 static void direct_interrupts_to_guc(struct drm_i915_private
*dev_priv
)
102 struct intel_engine_cs
*ring
;
105 /* tell all command streamers to forward interrupts and vblank to GuC */
106 irqs
= _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK
, GFX_FORWARD_VBLANK_ALWAYS
);
107 irqs
|= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING
);
108 for_each_ring(ring
, dev_priv
, i
)
109 I915_WRITE(RING_MODE_GEN7(ring
), irqs
);
111 /* route USER_INTERRUPT to Host, all others are sent to GuC. */
112 irqs
= GT_RENDER_USER_INTERRUPT
<< GEN8_RCS_IRQ_SHIFT
|
113 GT_RENDER_USER_INTERRUPT
<< GEN8_BCS_IRQ_SHIFT
;
114 /* These three registers have the same bit definitions */
115 I915_WRITE(GUC_BCS_RCS_IER
, ~irqs
);
116 I915_WRITE(GUC_VCS2_VCS1_IER
, ~irqs
);
117 I915_WRITE(GUC_WD_VECS_IER
, ~irqs
);
120 static u32
get_gttype(struct drm_i915_private
*dev_priv
)
122 /* XXX: GT type based on PCI device ID? field seems unused by fw */
126 static u32
get_core_family(struct drm_i915_private
*dev_priv
)
128 switch (INTEL_INFO(dev_priv
)->gen
) {
130 return GFXCORE_FAMILY_GEN9
;
133 DRM_ERROR("GUC: unsupported core family\n");
134 return GFXCORE_FAMILY_UNKNOWN
;
138 static void set_guc_init_params(struct drm_i915_private
*dev_priv
)
140 struct intel_guc
*guc
= &dev_priv
->guc
;
141 u32 params
[GUC_CTL_MAX_DWORDS
];
144 memset(¶ms
, 0, sizeof(params
));
146 params
[GUC_CTL_DEVICE_INFO
] |=
147 (get_gttype(dev_priv
) << GUC_CTL_GTTYPE_SHIFT
) |
148 (get_core_family(dev_priv
) << GUC_CTL_COREFAMILY_SHIFT
);
151 * GuC ARAT increment is 10 ns. GuC default scheduler quantum is one
152 * second. This ARAR is calculated by:
153 * Scheduler-Quantum-in-ns / ARAT-increment-in-ns = 1000000000 / 10
155 params
[GUC_CTL_ARAT_HIGH
] = 0;
156 params
[GUC_CTL_ARAT_LOW
] = 100000000;
158 params
[GUC_CTL_WA
] |= GUC_CTL_WA_UK_BY_DRIVER
;
160 params
[GUC_CTL_FEATURE
] |= GUC_CTL_DISABLE_SCHEDULER
|
161 GUC_CTL_VCS2_ENABLED
;
163 if (i915
.guc_log_level
>= 0) {
164 params
[GUC_CTL_LOG_PARAMS
] = guc
->log_flags
;
165 params
[GUC_CTL_DEBUG
] =
166 i915
.guc_log_level
<< GUC_LOG_VERBOSITY_SHIFT
;
169 /* If GuC submission is enabled, set up additional parameters here */
170 if (i915
.enable_guc_submission
) {
171 u32 pgs
= i915_gem_obj_ggtt_offset(dev_priv
->guc
.ctx_pool_obj
);
172 u32 ctx_in_16
= GUC_MAX_GPU_CONTEXTS
/ 16;
175 params
[GUC_CTL_CTXINFO
] = (pgs
<< GUC_CTL_BASE_ADDR_SHIFT
) |
176 (ctx_in_16
<< GUC_CTL_CTXNUM_IN16_SHIFT
);
178 params
[GUC_CTL_FEATURE
] |= GUC_CTL_KERNEL_SUBMISSIONS
;
180 /* Unmask this bit to enable the GuC's internal scheduler */
181 params
[GUC_CTL_FEATURE
] &= ~GUC_CTL_DISABLE_SCHEDULER
;
184 I915_WRITE(SOFT_SCRATCH(0), 0);
186 for (i
= 0; i
< GUC_CTL_MAX_DWORDS
; i
++)
187 I915_WRITE(SOFT_SCRATCH(1 + i
), params
[i
]);
192 * Read the GuC status register (GUC_STATUS) and store it in the
193 * specified location; then return a boolean indicating whether
194 * the value matches either of two values representing completion
195 * of the GuC boot process.
197 * This is used for polling the GuC status in a wait_for_atomic()
200 static inline bool guc_ucode_response(struct drm_i915_private
*dev_priv
,
203 u32 val
= I915_READ(GUC_STATUS
);
204 u32 uk_val
= val
& GS_UKERNEL_MASK
;
206 return (uk_val
== GS_UKERNEL_READY
||
207 ((val
& GS_MIA_CORE_STATE
) && uk_val
== GS_UKERNEL_LAPIC_DONE
));
211 * Transfer the firmware image to RAM for execution by the microcontroller.
213 * GuC Firmware layout:
214 * +-------------------------------+ ----
215 * | CSS header | 128B
216 * | contains major/minor version |
217 * +-------------------------------+ ----
219 * +-------------------------------+ ----
220 * | RSA signature | 256B
221 * +-------------------------------+ ----
223 * Architecturally, the DMA engine is bidirectional, and can potentially even
224 * transfer between GTT locations. This functionality is left out of the API
225 * for now as there is no need for it.
227 * Note that GuC needs the CSS header plus uKernel code to be copied by the
228 * DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
231 #define UOS_CSS_HEADER_OFFSET 0
232 #define UOS_VER_MINOR_OFFSET 0x44
233 #define UOS_VER_MAJOR_OFFSET 0x46
234 #define UOS_CSS_HEADER_SIZE 0x80
235 #define UOS_RSA_SIG_SIZE 0x100
238 static int guc_ucode_xfer_dma(struct drm_i915_private
*dev_priv
)
240 struct intel_guc_fw
*guc_fw
= &dev_priv
->guc
.guc_fw
;
241 struct drm_i915_gem_object
*fw_obj
= guc_fw
->guc_fw_obj
;
242 unsigned long offset
;
243 struct sg_table
*sg
= fw_obj
->pages
;
244 u32 status
, ucode_size
, rsa
[UOS_RSA_SIG_SIZE
/ sizeof(u32
)];
247 /* uCode size, also is where RSA signature starts */
248 offset
= ucode_size
= guc_fw
->guc_fw_size
- UOS_RSA_SIG_SIZE
;
249 I915_WRITE(DMA_COPY_SIZE
, ucode_size
);
251 /* Copy RSA signature from the fw image to HW for verification */
252 sg_pcopy_to_buffer(sg
->sgl
, sg
->nents
, rsa
, UOS_RSA_SIG_SIZE
, offset
);
253 for (i
= 0; i
< UOS_RSA_SIG_SIZE
/ sizeof(u32
); i
++)
254 I915_WRITE(UOS_RSA_SCRATCH(i
), rsa
[i
]);
256 /* Set the source address for the new blob */
257 offset
= i915_gem_obj_ggtt_offset(fw_obj
);
258 I915_WRITE(DMA_ADDR_0_LOW
, lower_32_bits(offset
));
259 I915_WRITE(DMA_ADDR_0_HIGH
, upper_32_bits(offset
) & 0xFFFF);
262 * Set the DMA destination. Current uCode expects the code to be
263 * loaded at 8k; locations below this are used for the stack.
265 I915_WRITE(DMA_ADDR_1_LOW
, 0x2000);
266 I915_WRITE(DMA_ADDR_1_HIGH
, DMA_ADDRESS_SPACE_WOPCM
);
268 /* Finally start the DMA */
269 I915_WRITE(DMA_CTRL
, _MASKED_BIT_ENABLE(UOS_MOVE
| START_DMA
));
272 * Spin-wait for the DMA to complete & the GuC to start up.
273 * NB: Docs recommend not using the interrupt for completion.
274 * Measurements indicate this should take no more than 20ms, so a
275 * timeout here indicates that the GuC has failed and is unusable.
276 * (Higher levels of the driver will attempt to fall back to
277 * execlist mode if this happens.)
279 ret
= wait_for_atomic(guc_ucode_response(dev_priv
, &status
), 100);
281 DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
282 I915_READ(DMA_CTRL
), status
);
284 if ((status
& GS_BOOTROM_MASK
) == GS_BOOTROM_RSA_FAILED
) {
285 DRM_ERROR("GuC firmware signature verification failed\n");
289 DRM_DEBUG_DRIVER("returning %d\n", ret
);
295 * Load the GuC firmware blob into the MinuteIA.
297 static int guc_ucode_xfer(struct drm_i915_private
*dev_priv
)
299 struct intel_guc_fw
*guc_fw
= &dev_priv
->guc
.guc_fw
;
300 struct drm_device
*dev
= dev_priv
->dev
;
303 ret
= i915_gem_object_set_to_gtt_domain(guc_fw
->guc_fw_obj
, false);
305 DRM_DEBUG_DRIVER("set-domain failed %d\n", ret
);
309 ret
= i915_gem_obj_ggtt_pin(guc_fw
->guc_fw_obj
, 0, 0);
311 DRM_DEBUG_DRIVER("pin failed %d\n", ret
);
315 /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
316 I915_WRITE(GEN8_GTCR
, GEN8_GTCR_INVALIDATE
);
318 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
321 I915_WRITE(GUC_WOPCM_SIZE
, GUC_WOPCM_SIZE_VALUE
);
322 I915_WRITE(DMA_GUC_WOPCM_OFFSET
, GUC_WOPCM_OFFSET_VALUE
);
324 /* Enable MIA caching. GuC clock gating is disabled. */
325 I915_WRITE(GUC_SHIM_CONTROL
, GUC_SHIM_CONTROL_VALUE
);
327 /* WaDisableMinuteIaClockGating:skl,bxt */
328 if ((IS_SKYLAKE(dev
) && INTEL_REVID(dev
) <= SKL_REVID_B0
) ||
329 (IS_BROXTON(dev
) && INTEL_REVID(dev
) == BXT_REVID_A0
)) {
330 I915_WRITE(GUC_SHIM_CONTROL
, (I915_READ(GUC_SHIM_CONTROL
) &
331 ~GUC_ENABLE_MIA_CLOCK_GATING
));
334 /* WaC6DisallowByGfxPause*/
335 I915_WRITE(GEN6_GFXPAUSE
, 0x30FFF);
338 I915_WRITE(GEN9LP_GT_PM_CONFIG
, GT_DOORBELL_ENABLE
);
340 I915_WRITE(GEN9_GT_PM_CONFIG
, GT_DOORBELL_ENABLE
);
343 /* DOP Clock Gating Enable for GuC clocks */
344 I915_WRITE(GEN7_MISCCPCTL
, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE
|
345 I915_READ(GEN7_MISCCPCTL
)));
347 /* allows for 5us before GT can go to RC6 */
348 I915_WRITE(GUC_ARAT_C6DIS
, 0x1FF);
351 set_guc_init_params(dev_priv
);
353 ret
= guc_ucode_xfer_dma(dev_priv
);
355 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
358 * We keep the object pages for reuse during resume. But we can unpin it
359 * now that DMA has completed, so it doesn't continue to take up space.
361 i915_gem_object_ggtt_unpin(guc_fw
->guc_fw_obj
);
367 * intel_guc_ucode_load() - load GuC uCode into the device
370 * Called from gem_init_hw() during driver loading and also after a GPU reset.
372 * The firmware image should have already been fetched into memory by the
373 * earlier call to intel_guc_ucode_init(), so here we need only check that
374 * is succeeded, and then transfer the image to the h/w.
376 * Return: non-zero code on error
378 int intel_guc_ucode_load(struct drm_device
*dev
)
380 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
381 struct intel_guc_fw
*guc_fw
= &dev_priv
->guc
.guc_fw
;
384 DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
385 intel_guc_fw_status_repr(guc_fw
->guc_fw_fetch_status
),
386 intel_guc_fw_status_repr(guc_fw
->guc_fw_load_status
));
388 direct_interrupts_to_host(dev_priv
);
390 if (guc_fw
->guc_fw_fetch_status
== GUC_FIRMWARE_NONE
)
393 if (guc_fw
->guc_fw_fetch_status
== GUC_FIRMWARE_SUCCESS
&&
394 guc_fw
->guc_fw_load_status
== GUC_FIRMWARE_FAIL
)
397 guc_fw
->guc_fw_load_status
= GUC_FIRMWARE_PENDING
;
399 DRM_DEBUG_DRIVER("GuC fw fetch status %s\n",
400 intel_guc_fw_status_repr(guc_fw
->guc_fw_fetch_status
));
402 switch (guc_fw
->guc_fw_fetch_status
) {
403 case GUC_FIRMWARE_FAIL
:
404 /* something went wrong :( */
408 case GUC_FIRMWARE_NONE
:
409 case GUC_FIRMWARE_PENDING
:
412 WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n",
414 intel_guc_fw_status_repr(guc_fw
->guc_fw_fetch_status
),
415 guc_fw
->guc_fw_fetch_status
);
419 case GUC_FIRMWARE_SUCCESS
:
423 err
= i915_guc_submission_init(dev
);
427 err
= guc_ucode_xfer(dev_priv
);
431 guc_fw
->guc_fw_load_status
= GUC_FIRMWARE_SUCCESS
;
433 DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
434 intel_guc_fw_status_repr(guc_fw
->guc_fw_fetch_status
),
435 intel_guc_fw_status_repr(guc_fw
->guc_fw_load_status
));
437 if (i915
.enable_guc_submission
) {
438 /* The execbuf_client will be recreated. Release it first. */
439 i915_guc_submission_disable(dev
);
441 err
= i915_guc_submission_enable(dev
);
444 direct_interrupts_to_guc(dev_priv
);
450 if (guc_fw
->guc_fw_load_status
== GUC_FIRMWARE_PENDING
)
451 guc_fw
->guc_fw_load_status
= GUC_FIRMWARE_FAIL
;
453 direct_interrupts_to_host(dev_priv
);
454 i915_guc_submission_disable(dev
);
459 static void guc_fw_fetch(struct drm_device
*dev
, struct intel_guc_fw
*guc_fw
)
461 struct drm_i915_gem_object
*obj
;
462 const struct firmware
*fw
;
463 const u8
*css_header
;
464 const size_t minsize
= UOS_CSS_HEADER_SIZE
+ UOS_RSA_SIG_SIZE
;
465 const size_t maxsize
= GUC_WOPCM_SIZE_VALUE
+ UOS_RSA_SIG_SIZE
466 - 0x8000; /* 32k reserved (8K stack + 24k context) */
469 DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
470 intel_guc_fw_status_repr(guc_fw
->guc_fw_fetch_status
));
472 err
= request_firmware(&fw
, guc_fw
->guc_fw_path
, &dev
->pdev
->dev
);
478 DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
479 guc_fw
->guc_fw_path
, fw
);
480 DRM_DEBUG_DRIVER("firmware file size %zu (minimum %zu, maximum %zu)\n",
481 fw
->size
, minsize
, maxsize
);
483 /* Check the size of the blob befoe examining buffer contents */
484 if (fw
->size
< minsize
|| fw
->size
> maxsize
)
488 * The GuC firmware image has the version number embedded at a well-known
489 * offset within the firmware blob; note that major / minor version are
490 * TWO bytes each (i.e. u16), although all pointers and offsets are defined
491 * in terms of bytes (u8).
493 css_header
= fw
->data
+ UOS_CSS_HEADER_OFFSET
;
494 guc_fw
->guc_fw_major_found
= *(u16
*)(css_header
+ UOS_VER_MAJOR_OFFSET
);
495 guc_fw
->guc_fw_minor_found
= *(u16
*)(css_header
+ UOS_VER_MINOR_OFFSET
);
497 if (guc_fw
->guc_fw_major_found
!= guc_fw
->guc_fw_major_wanted
||
498 guc_fw
->guc_fw_minor_found
< guc_fw
->guc_fw_minor_wanted
) {
499 DRM_ERROR("GuC firmware version %d.%d, required %d.%d\n",
500 guc_fw
->guc_fw_major_found
, guc_fw
->guc_fw_minor_found
,
501 guc_fw
->guc_fw_major_wanted
, guc_fw
->guc_fw_minor_wanted
);
506 DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
507 guc_fw
->guc_fw_major_found
, guc_fw
->guc_fw_minor_found
,
508 guc_fw
->guc_fw_major_wanted
, guc_fw
->guc_fw_minor_wanted
);
510 mutex_lock(&dev
->struct_mutex
);
511 obj
= i915_gem_object_create_from_data(dev
, fw
->data
, fw
->size
);
512 mutex_unlock(&dev
->struct_mutex
);
513 if (IS_ERR_OR_NULL(obj
)) {
514 err
= obj
? PTR_ERR(obj
) : -ENOMEM
;
518 guc_fw
->guc_fw_obj
= obj
;
519 guc_fw
->guc_fw_size
= fw
->size
;
521 DRM_DEBUG_DRIVER("GuC fw fetch status SUCCESS, obj %p\n",
524 release_firmware(fw
);
525 guc_fw
->guc_fw_fetch_status
= GUC_FIRMWARE_SUCCESS
;
529 DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
530 err
, fw
, guc_fw
->guc_fw_obj
);
531 DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
532 guc_fw
->guc_fw_path
, err
);
534 obj
= guc_fw
->guc_fw_obj
;
536 drm_gem_object_unreference(&obj
->base
);
537 guc_fw
->guc_fw_obj
= NULL
;
539 release_firmware(fw
); /* OK even if fw is NULL */
540 guc_fw
->guc_fw_fetch_status
= GUC_FIRMWARE_FAIL
;
544 * intel_guc_ucode_init() - define parameters and fetch firmware
547 * Called early during driver load, but after GEM is initialised.
549 * The firmware will be transferred to the GuC's memory later,
550 * when intel_guc_ucode_load() is called.
552 void intel_guc_ucode_init(struct drm_device
*dev
)
554 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
555 struct intel_guc_fw
*guc_fw
= &dev_priv
->guc
.guc_fw
;
558 if (!HAS_GUC_SCHED(dev
))
559 i915
.enable_guc_submission
= false;
561 if (!HAS_GUC_UCODE(dev
)) {
563 } else if (IS_SKYLAKE(dev
)) {
564 fw_path
= I915_SKL_GUC_UCODE
;
565 guc_fw
->guc_fw_major_wanted
= 4;
566 guc_fw
->guc_fw_minor_wanted
= 3;
568 i915
.enable_guc_submission
= false;
569 fw_path
= ""; /* unknown device */
572 guc_fw
->guc_dev
= dev
;
573 guc_fw
->guc_fw_path
= fw_path
;
574 guc_fw
->guc_fw_fetch_status
= GUC_FIRMWARE_NONE
;
575 guc_fw
->guc_fw_load_status
= GUC_FIRMWARE_NONE
;
580 if (*fw_path
== '\0') {
581 DRM_ERROR("No GuC firmware known for this platform\n");
582 guc_fw
->guc_fw_fetch_status
= GUC_FIRMWARE_FAIL
;
586 guc_fw
->guc_fw_fetch_status
= GUC_FIRMWARE_PENDING
;
587 DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path
);
588 guc_fw_fetch(dev
, guc_fw
);
589 /* status must now be FAIL or SUCCESS */
594 * intel_guc_ucode_fini() - clean up all allocated resources
597 void intel_guc_ucode_fini(struct drm_device
*dev
)
599 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
600 struct intel_guc_fw
*guc_fw
= &dev_priv
->guc
.guc_fw
;
602 direct_interrupts_to_host(dev_priv
);
603 i915_guc_submission_fini(dev
);
605 mutex_lock(&dev
->struct_mutex
);
606 if (guc_fw
->guc_fw_obj
)
607 drm_gem_object_unreference(&guc_fw
->guc_fw_obj
->base
);
608 guc_fw
->guc_fw_obj
= NULL
;
609 mutex_unlock(&dev
->struct_mutex
);
611 guc_fw
->guc_fw_fetch_status
= GUC_FIRMWARE_NONE
;