1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #include <linux/sysrq.h>
30 #include <linux/slab.h>
35 #include "i915_trace.h"
36 #include "intel_drv.h"
38 #define MAX_NOPID ((u32)~0)
41 * Interrupts that are always left unmasked.
43 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
44 * we leave them always unmasked in IMR and then control enabling them through
47 #define I915_INTERRUPT_ENABLE_FIX \
48 (I915_ASLE_INTERRUPT | \
49 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
50 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
51 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
52 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
53 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
55 /** Interrupts that we mask and unmask at runtime. */
56 #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
58 #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
59 PIPE_VBLANK_INTERRUPT_STATUS)
61 #define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
62 PIPE_VBLANK_INTERRUPT_ENABLE)
64 #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
65 DRM_I915_VBLANK_PIPE_B)
68 ironlake_enable_graphics_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
70 if ((dev_priv
->gt_irq_mask_reg
& mask
) != 0) {
71 dev_priv
->gt_irq_mask_reg
&= ~mask
;
72 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask_reg
);
73 (void) I915_READ(GTIMR
);
78 ironlake_disable_graphics_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
80 if ((dev_priv
->gt_irq_mask_reg
& mask
) != mask
) {
81 dev_priv
->gt_irq_mask_reg
|= mask
;
82 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask_reg
);
83 (void) I915_READ(GTIMR
);
87 /* For display hotplug interrupt */
89 ironlake_enable_display_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
91 if ((dev_priv
->irq_mask_reg
& mask
) != 0) {
92 dev_priv
->irq_mask_reg
&= ~mask
;
93 I915_WRITE(DEIMR
, dev_priv
->irq_mask_reg
);
94 (void) I915_READ(DEIMR
);
99 ironlake_disable_display_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
101 if ((dev_priv
->irq_mask_reg
& mask
) != mask
) {
102 dev_priv
->irq_mask_reg
|= mask
;
103 I915_WRITE(DEIMR
, dev_priv
->irq_mask_reg
);
104 (void) I915_READ(DEIMR
);
109 i915_enable_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
111 if ((dev_priv
->irq_mask_reg
& mask
) != 0) {
112 dev_priv
->irq_mask_reg
&= ~mask
;
113 I915_WRITE(IMR
, dev_priv
->irq_mask_reg
);
114 (void) I915_READ(IMR
);
119 i915_disable_irq(drm_i915_private_t
*dev_priv
, u32 mask
)
121 if ((dev_priv
->irq_mask_reg
& mask
) != mask
) {
122 dev_priv
->irq_mask_reg
|= mask
;
123 I915_WRITE(IMR
, dev_priv
->irq_mask_reg
);
124 (void) I915_READ(IMR
);
129 i915_pipestat(int pipe
)
139 i915_enable_pipestat(drm_i915_private_t
*dev_priv
, int pipe
, u32 mask
)
141 if ((dev_priv
->pipestat
[pipe
] & mask
) != mask
) {
142 u32 reg
= i915_pipestat(pipe
);
144 dev_priv
->pipestat
[pipe
] |= mask
;
145 /* Enable the interrupt, clear any pending status */
146 I915_WRITE(reg
, dev_priv
->pipestat
[pipe
] | (mask
>> 16));
147 (void) I915_READ(reg
);
152 i915_disable_pipestat(drm_i915_private_t
*dev_priv
, int pipe
, u32 mask
)
154 if ((dev_priv
->pipestat
[pipe
] & mask
) != 0) {
155 u32 reg
= i915_pipestat(pipe
);
157 dev_priv
->pipestat
[pipe
] &= ~mask
;
158 I915_WRITE(reg
, dev_priv
->pipestat
[pipe
]);
159 (void) I915_READ(reg
);
164 * intel_enable_asle - enable ASLE interrupt for OpRegion
166 void intel_enable_asle (struct drm_device
*dev
)
168 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
170 if (HAS_PCH_SPLIT(dev
))
171 ironlake_enable_display_irq(dev_priv
, DE_GSE
);
173 i915_enable_pipestat(dev_priv
, 1,
174 PIPE_LEGACY_BLC_EVENT_ENABLE
);
175 if (INTEL_INFO(dev
)->gen
>= 4)
176 i915_enable_pipestat(dev_priv
, 0,
177 PIPE_LEGACY_BLC_EVENT_ENABLE
);
182 * i915_pipe_enabled - check if a pipe is enabled
184 * @pipe: pipe to check
186 * Reading certain registers when the pipe is disabled can hang the chip.
187 * Use this routine to make sure the PLL is running and the pipe is active
188 * before reading such registers if unsure.
191 i915_pipe_enabled(struct drm_device
*dev
, int pipe
)
193 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
194 return I915_READ(PIPECONF(pipe
)) & PIPECONF_ENABLE
;
197 /* Called from drm generic code, passed a 'crtc', which
198 * we use as a pipe index
200 u32
i915_get_vblank_counter(struct drm_device
*dev
, int pipe
)
202 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
203 unsigned long high_frame
;
204 unsigned long low_frame
;
205 u32 high1
, high2
, low
;
207 if (!i915_pipe_enabled(dev
, pipe
)) {
208 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
213 high_frame
= pipe
? PIPEBFRAMEHIGH
: PIPEAFRAMEHIGH
;
214 low_frame
= pipe
? PIPEBFRAMEPIXEL
: PIPEAFRAMEPIXEL
;
217 * High & low register fields aren't synchronized, so make sure
218 * we get a low value that's stable across two reads of the high
222 high1
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
223 low
= I915_READ(low_frame
) & PIPE_FRAME_LOW_MASK
;
224 high2
= I915_READ(high_frame
) & PIPE_FRAME_HIGH_MASK
;
225 } while (high1
!= high2
);
227 high1
>>= PIPE_FRAME_HIGH_SHIFT
;
228 low
>>= PIPE_FRAME_LOW_SHIFT
;
229 return (high1
<< 8) | low
;
232 u32
gm45_get_vblank_counter(struct drm_device
*dev
, int pipe
)
234 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
235 int reg
= pipe
? PIPEB_FRMCOUNT_GM45
: PIPEA_FRMCOUNT_GM45
;
237 if (!i915_pipe_enabled(dev
, pipe
)) {
238 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
243 return I915_READ(reg
);
247 * Handle hotplug events outside the interrupt handler proper.
249 static void i915_hotplug_work_func(struct work_struct
*work
)
251 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
253 struct drm_device
*dev
= dev_priv
->dev
;
254 struct drm_mode_config
*mode_config
= &dev
->mode_config
;
255 struct intel_encoder
*encoder
;
257 list_for_each_entry(encoder
, &mode_config
->encoder_list
, base
.head
)
258 if (encoder
->hot_plug
)
259 encoder
->hot_plug(encoder
);
261 /* Just fire off a uevent and let userspace tell us what to do */
262 drm_helper_hpd_irq_event(dev
);
265 static void i915_handle_rps_change(struct drm_device
*dev
)
267 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
268 u32 busy_up
, busy_down
, max_avg
, min_avg
;
269 u8 new_delay
= dev_priv
->cur_delay
;
271 I915_WRITE16(MEMINTRSTS
, MEMINT_EVAL_CHG
);
272 busy_up
= I915_READ(RCPREVBSYTUPAVG
);
273 busy_down
= I915_READ(RCPREVBSYTDNAVG
);
274 max_avg
= I915_READ(RCBMAXAVG
);
275 min_avg
= I915_READ(RCBMINAVG
);
277 /* Handle RCS change request from hw */
278 if (busy_up
> max_avg
) {
279 if (dev_priv
->cur_delay
!= dev_priv
->max_delay
)
280 new_delay
= dev_priv
->cur_delay
- 1;
281 if (new_delay
< dev_priv
->max_delay
)
282 new_delay
= dev_priv
->max_delay
;
283 } else if (busy_down
< min_avg
) {
284 if (dev_priv
->cur_delay
!= dev_priv
->min_delay
)
285 new_delay
= dev_priv
->cur_delay
+ 1;
286 if (new_delay
> dev_priv
->min_delay
)
287 new_delay
= dev_priv
->min_delay
;
290 if (ironlake_set_drps(dev
, new_delay
))
291 dev_priv
->cur_delay
= new_delay
;
296 static void notify_ring(struct drm_device
*dev
,
297 struct intel_ring_buffer
*ring
)
299 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
300 u32 seqno
= ring
->get_seqno(ring
);
301 ring
->irq_seqno
= seqno
;
302 trace_i915_gem_request_complete(dev
, seqno
);
303 wake_up_all(&ring
->irq_queue
);
304 dev_priv
->hangcheck_count
= 0;
305 mod_timer(&dev_priv
->hangcheck_timer
,
306 jiffies
+ msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD
));
309 static irqreturn_t
ironlake_irq_handler(struct drm_device
*dev
)
311 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
313 u32 de_iir
, gt_iir
, de_ier
, pch_iir
;
315 struct drm_i915_master_private
*master_priv
;
316 u32 bsd_usr_interrupt
= GT_BSD_USER_INTERRUPT
;
319 bsd_usr_interrupt
= GT_GEN6_BSD_USER_INTERRUPT
;
321 /* disable master interrupt before clearing iir */
322 de_ier
= I915_READ(DEIER
);
323 I915_WRITE(DEIER
, de_ier
& ~DE_MASTER_IRQ_CONTROL
);
324 (void)I915_READ(DEIER
);
326 de_iir
= I915_READ(DEIIR
);
327 gt_iir
= I915_READ(GTIIR
);
328 pch_iir
= I915_READ(SDEIIR
);
330 if (de_iir
== 0 && gt_iir
== 0 && pch_iir
== 0)
333 if (HAS_PCH_CPT(dev
))
334 hotplug_mask
= SDE_HOTPLUG_MASK_CPT
;
336 hotplug_mask
= SDE_HOTPLUG_MASK
;
340 if (dev
->primary
->master
) {
341 master_priv
= dev
->primary
->master
->driver_priv
;
342 if (master_priv
->sarea_priv
)
343 master_priv
->sarea_priv
->last_dispatch
=
344 READ_BREADCRUMB(dev_priv
);
347 if (gt_iir
& GT_PIPE_NOTIFY
)
348 notify_ring(dev
, &dev_priv
->render_ring
);
349 if (gt_iir
& bsd_usr_interrupt
)
350 notify_ring(dev
, &dev_priv
->bsd_ring
);
351 if (HAS_BLT(dev
) && gt_iir
& GT_BLT_USER_INTERRUPT
)
352 notify_ring(dev
, &dev_priv
->blt_ring
);
355 intel_opregion_gse_intr(dev
);
357 if (de_iir
& DE_PLANEA_FLIP_DONE
) {
358 intel_prepare_page_flip(dev
, 0);
359 intel_finish_page_flip_plane(dev
, 0);
362 if (de_iir
& DE_PLANEB_FLIP_DONE
) {
363 intel_prepare_page_flip(dev
, 1);
364 intel_finish_page_flip_plane(dev
, 1);
367 if (de_iir
& DE_PIPEA_VBLANK
)
368 drm_handle_vblank(dev
, 0);
370 if (de_iir
& DE_PIPEB_VBLANK
)
371 drm_handle_vblank(dev
, 1);
373 /* check event from PCH */
374 if ((de_iir
& DE_PCH_EVENT
) && (pch_iir
& hotplug_mask
))
375 queue_work(dev_priv
->wq
, &dev_priv
->hotplug_work
);
377 if (de_iir
& DE_PCU_EVENT
) {
378 I915_WRITE16(MEMINTRSTS
, I915_READ(MEMINTRSTS
));
379 i915_handle_rps_change(dev
);
382 /* should clear PCH hotplug event before clear CPU irq */
383 I915_WRITE(SDEIIR
, pch_iir
);
384 I915_WRITE(GTIIR
, gt_iir
);
385 I915_WRITE(DEIIR
, de_iir
);
388 I915_WRITE(DEIER
, de_ier
);
389 (void)I915_READ(DEIER
);
395 * i915_error_work_func - do process context error handling work
398 * Fire an error uevent so userspace can see that a hang or error
401 static void i915_error_work_func(struct work_struct
*work
)
403 drm_i915_private_t
*dev_priv
= container_of(work
, drm_i915_private_t
,
405 struct drm_device
*dev
= dev_priv
->dev
;
406 char *error_event
[] = { "ERROR=1", NULL
};
407 char *reset_event
[] = { "RESET=1", NULL
};
408 char *reset_done_event
[] = { "ERROR=0", NULL
};
410 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, error_event
);
412 if (atomic_read(&dev_priv
->mm
.wedged
)) {
413 DRM_DEBUG_DRIVER("resetting chip\n");
414 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, reset_event
);
415 if (!i915_reset(dev
, GRDOM_RENDER
)) {
416 atomic_set(&dev_priv
->mm
.wedged
, 0);
417 kobject_uevent_env(&dev
->primary
->kdev
.kobj
, KOBJ_CHANGE
, reset_done_event
);
419 complete_all(&dev_priv
->error_completion
);
423 #ifdef CONFIG_DEBUG_FS
424 static struct drm_i915_error_object
*
425 i915_error_object_create(struct drm_device
*dev
,
426 struct drm_gem_object
*src
)
428 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
429 struct drm_i915_error_object
*dst
;
430 struct drm_i915_gem_object
*src_priv
;
431 int page
, page_count
;
437 src_priv
= to_intel_bo(src
);
438 if (src_priv
->pages
== NULL
)
441 page_count
= src
->size
/ PAGE_SIZE
;
443 dst
= kmalloc(sizeof(*dst
) + page_count
* sizeof (u32
*), GFP_ATOMIC
);
447 reloc_offset
= src_priv
->gtt_offset
;
448 for (page
= 0; page
< page_count
; page
++) {
453 d
= kmalloc(PAGE_SIZE
, GFP_ATOMIC
);
457 local_irq_save(flags
);
458 s
= io_mapping_map_atomic_wc(dev_priv
->mm
.gtt_mapping
,
460 memcpy_fromio(d
, s
, PAGE_SIZE
);
461 io_mapping_unmap_atomic(s
);
462 local_irq_restore(flags
);
464 dst
->pages
[page
] = d
;
466 reloc_offset
+= PAGE_SIZE
;
468 dst
->page_count
= page_count
;
469 dst
->gtt_offset
= src_priv
->gtt_offset
;
475 kfree(dst
->pages
[page
]);
481 i915_error_object_free(struct drm_i915_error_object
*obj
)
488 for (page
= 0; page
< obj
->page_count
; page
++)
489 kfree(obj
->pages
[page
]);
495 i915_error_state_free(struct drm_device
*dev
,
496 struct drm_i915_error_state
*error
)
498 i915_error_object_free(error
->batchbuffer
[0]);
499 i915_error_object_free(error
->batchbuffer
[1]);
500 i915_error_object_free(error
->ringbuffer
);
501 kfree(error
->active_bo
);
502 kfree(error
->overlay
);
507 i915_get_bbaddr(struct drm_device
*dev
, u32
*ring
)
511 if (IS_I830(dev
) || IS_845G(dev
))
512 cmd
= MI_BATCH_BUFFER
;
513 else if (INTEL_INFO(dev
)->gen
>= 4)
514 cmd
= (MI_BATCH_BUFFER_START
| (2 << 6) |
515 MI_BATCH_NON_SECURE_I965
);
517 cmd
= (MI_BATCH_BUFFER_START
| (2 << 6));
519 return ring
[0] == cmd
? ring
[1] : 0;
523 i915_ringbuffer_last_batch(struct drm_device
*dev
)
525 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
529 /* Locate the current position in the ringbuffer and walk back
530 * to find the most recently dispatched batch buffer.
533 head
= I915_READ(PRB0_HEAD
) & HEAD_ADDR
;
534 ring
= (u32
*)(dev_priv
->render_ring
.virtual_start
+ head
);
536 while (--ring
>= (u32
*)dev_priv
->render_ring
.virtual_start
) {
537 bbaddr
= i915_get_bbaddr(dev
, ring
);
543 ring
= (u32
*)(dev_priv
->render_ring
.virtual_start
544 + dev_priv
->render_ring
.size
);
545 while (--ring
>= (u32
*)dev_priv
->render_ring
.virtual_start
) {
546 bbaddr
= i915_get_bbaddr(dev
, ring
);
556 * i915_capture_error_state - capture an error record for later analysis
559 * Should be called when an error is detected (either a hang or an error
560 * interrupt) to capture error state from the time of the error. Fills
561 * out a structure which becomes available in debugfs for user level tools
564 static void i915_capture_error_state(struct drm_device
*dev
)
566 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
567 struct drm_i915_gem_object
*obj_priv
;
568 struct drm_i915_error_state
*error
;
569 struct drm_gem_object
*batchbuffer
[2];
574 spin_lock_irqsave(&dev_priv
->error_lock
, flags
);
575 error
= dev_priv
->first_error
;
576 spin_unlock_irqrestore(&dev_priv
->error_lock
, flags
);
580 error
= kmalloc(sizeof(*error
), GFP_ATOMIC
);
582 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
586 DRM_DEBUG_DRIVER("generating error event\n");
589 dev_priv
->render_ring
.get_seqno(&dev_priv
->render_ring
);
590 error
->eir
= I915_READ(EIR
);
591 error
->pgtbl_er
= I915_READ(PGTBL_ER
);
592 error
->pipeastat
= I915_READ(PIPEASTAT
);
593 error
->pipebstat
= I915_READ(PIPEBSTAT
);
594 error
->instpm
= I915_READ(INSTPM
);
596 if (INTEL_INFO(dev
)->gen
>= 6) {
597 error
->error
= I915_READ(ERROR_GEN6
);
598 error
->bcs_acthd
= I915_READ(BCS_ACTHD
);
599 error
->bcs_ipehr
= I915_READ(BCS_IPEHR
);
600 error
->bcs_ipeir
= I915_READ(BCS_IPEIR
);
601 error
->bcs_instdone
= I915_READ(BCS_INSTDONE
);
602 error
->bcs_seqno
= 0;
603 if (dev_priv
->blt_ring
.get_seqno
)
604 error
->bcs_seqno
= dev_priv
->blt_ring
.get_seqno(&dev_priv
->blt_ring
);
606 if (INTEL_INFO(dev
)->gen
>= 4) {
607 error
->ipeir
= I915_READ(IPEIR_I965
);
608 error
->ipehr
= I915_READ(IPEHR_I965
);
609 error
->instdone
= I915_READ(INSTDONE_I965
);
610 error
->instps
= I915_READ(INSTPS
);
611 error
->instdone1
= I915_READ(INSTDONE1
);
612 error
->acthd
= I915_READ(ACTHD_I965
);
613 error
->bbaddr
= I915_READ64(BB_ADDR
);
615 error
->ipeir
= I915_READ(IPEIR
);
616 error
->ipehr
= I915_READ(IPEHR
);
617 error
->instdone
= I915_READ(INSTDONE
);
618 error
->acthd
= I915_READ(ACTHD
);
622 bbaddr
= i915_ringbuffer_last_batch(dev
);
624 /* Grab the current batchbuffer, most likely to have crashed. */
625 batchbuffer
[0] = NULL
;
626 batchbuffer
[1] = NULL
;
628 list_for_each_entry(obj_priv
, &dev_priv
->mm
.active_list
, mm_list
) {
629 struct drm_gem_object
*obj
= &obj_priv
->base
;
631 if (batchbuffer
[0] == NULL
&&
632 bbaddr
>= obj_priv
->gtt_offset
&&
633 bbaddr
< obj_priv
->gtt_offset
+ obj
->size
)
634 batchbuffer
[0] = obj
;
636 if (batchbuffer
[1] == NULL
&&
637 error
->acthd
>= obj_priv
->gtt_offset
&&
638 error
->acthd
< obj_priv
->gtt_offset
+ obj
->size
)
639 batchbuffer
[1] = obj
;
643 /* Scan the other lists for completeness for those bizarre errors. */
644 if (batchbuffer
[0] == NULL
|| batchbuffer
[1] == NULL
) {
645 list_for_each_entry(obj_priv
, &dev_priv
->mm
.flushing_list
, mm_list
) {
646 struct drm_gem_object
*obj
= &obj_priv
->base
;
648 if (batchbuffer
[0] == NULL
&&
649 bbaddr
>= obj_priv
->gtt_offset
&&
650 bbaddr
< obj_priv
->gtt_offset
+ obj
->size
)
651 batchbuffer
[0] = obj
;
653 if (batchbuffer
[1] == NULL
&&
654 error
->acthd
>= obj_priv
->gtt_offset
&&
655 error
->acthd
< obj_priv
->gtt_offset
+ obj
->size
)
656 batchbuffer
[1] = obj
;
658 if (batchbuffer
[0] && batchbuffer
[1])
662 if (batchbuffer
[0] == NULL
|| batchbuffer
[1] == NULL
) {
663 list_for_each_entry(obj_priv
, &dev_priv
->mm
.inactive_list
, mm_list
) {
664 struct drm_gem_object
*obj
= &obj_priv
->base
;
666 if (batchbuffer
[0] == NULL
&&
667 bbaddr
>= obj_priv
->gtt_offset
&&
668 bbaddr
< obj_priv
->gtt_offset
+ obj
->size
)
669 batchbuffer
[0] = obj
;
671 if (batchbuffer
[1] == NULL
&&
672 error
->acthd
>= obj_priv
->gtt_offset
&&
673 error
->acthd
< obj_priv
->gtt_offset
+ obj
->size
)
674 batchbuffer
[1] = obj
;
676 if (batchbuffer
[0] && batchbuffer
[1])
681 /* We need to copy these to an anonymous buffer as the simplest
682 * method to avoid being overwritten by userspace.
684 error
->batchbuffer
[0] = i915_error_object_create(dev
, batchbuffer
[0]);
685 if (batchbuffer
[1] != batchbuffer
[0])
686 error
->batchbuffer
[1] = i915_error_object_create(dev
, batchbuffer
[1]);
688 error
->batchbuffer
[1] = NULL
;
690 /* Record the ringbuffer */
691 error
->ringbuffer
= i915_error_object_create(dev
,
692 dev_priv
->render_ring
.gem_object
);
694 /* Record buffers on the active list. */
695 error
->active_bo
= NULL
;
696 error
->active_bo_count
= 0;
699 error
->active_bo
= kmalloc(sizeof(*error
->active_bo
)*count
,
702 if (error
->active_bo
) {
704 list_for_each_entry(obj_priv
, &dev_priv
->mm
.active_list
, mm_list
) {
705 struct drm_gem_object
*obj
= &obj_priv
->base
;
707 error
->active_bo
[i
].size
= obj
->size
;
708 error
->active_bo
[i
].name
= obj
->name
;
709 error
->active_bo
[i
].seqno
= obj_priv
->last_rendering_seqno
;
710 error
->active_bo
[i
].gtt_offset
= obj_priv
->gtt_offset
;
711 error
->active_bo
[i
].read_domains
= obj
->read_domains
;
712 error
->active_bo
[i
].write_domain
= obj
->write_domain
;
713 error
->active_bo
[i
].fence_reg
= obj_priv
->fence_reg
;
714 error
->active_bo
[i
].pinned
= 0;
715 if (obj_priv
->pin_count
> 0)
716 error
->active_bo
[i
].pinned
= 1;
717 if (obj_priv
->user_pin_count
> 0)
718 error
->active_bo
[i
].pinned
= -1;
719 error
->active_bo
[i
].tiling
= obj_priv
->tiling_mode
;
720 error
->active_bo
[i
].dirty
= obj_priv
->dirty
;
721 error
->active_bo
[i
].purgeable
= obj_priv
->madv
!= I915_MADV_WILLNEED
;
726 error
->active_bo_count
= i
;
729 do_gettimeofday(&error
->time
);
731 error
->overlay
= intel_overlay_capture_error_state(dev
);
733 spin_lock_irqsave(&dev_priv
->error_lock
, flags
);
734 if (dev_priv
->first_error
== NULL
) {
735 dev_priv
->first_error
= error
;
738 spin_unlock_irqrestore(&dev_priv
->error_lock
, flags
);
741 i915_error_state_free(dev
, error
);
744 void i915_destroy_error_state(struct drm_device
*dev
)
746 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
747 struct drm_i915_error_state
*error
;
749 spin_lock(&dev_priv
->error_lock
);
750 error
= dev_priv
->first_error
;
751 dev_priv
->first_error
= NULL
;
752 spin_unlock(&dev_priv
->error_lock
);
755 i915_error_state_free(dev
, error
);
758 #define i915_capture_error_state(x)
761 static void i915_report_and_clear_eir(struct drm_device
*dev
)
763 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
764 u32 eir
= I915_READ(EIR
);
769 printk(KERN_ERR
"render error detected, EIR: 0x%08x\n",
773 if (eir
& (GM45_ERROR_MEM_PRIV
| GM45_ERROR_CP_PRIV
)) {
774 u32 ipeir
= I915_READ(IPEIR_I965
);
776 printk(KERN_ERR
" IPEIR: 0x%08x\n",
777 I915_READ(IPEIR_I965
));
778 printk(KERN_ERR
" IPEHR: 0x%08x\n",
779 I915_READ(IPEHR_I965
));
780 printk(KERN_ERR
" INSTDONE: 0x%08x\n",
781 I915_READ(INSTDONE_I965
));
782 printk(KERN_ERR
" INSTPS: 0x%08x\n",
784 printk(KERN_ERR
" INSTDONE1: 0x%08x\n",
785 I915_READ(INSTDONE1
));
786 printk(KERN_ERR
" ACTHD: 0x%08x\n",
787 I915_READ(ACTHD_I965
));
788 I915_WRITE(IPEIR_I965
, ipeir
);
789 (void)I915_READ(IPEIR_I965
);
791 if (eir
& GM45_ERROR_PAGE_TABLE
) {
792 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
793 printk(KERN_ERR
"page table error\n");
794 printk(KERN_ERR
" PGTBL_ER: 0x%08x\n",
796 I915_WRITE(PGTBL_ER
, pgtbl_err
);
797 (void)I915_READ(PGTBL_ER
);
802 if (eir
& I915_ERROR_PAGE_TABLE
) {
803 u32 pgtbl_err
= I915_READ(PGTBL_ER
);
804 printk(KERN_ERR
"page table error\n");
805 printk(KERN_ERR
" PGTBL_ER: 0x%08x\n",
807 I915_WRITE(PGTBL_ER
, pgtbl_err
);
808 (void)I915_READ(PGTBL_ER
);
812 if (eir
& I915_ERROR_MEMORY_REFRESH
) {
813 u32 pipea_stats
= I915_READ(PIPEASTAT
);
814 u32 pipeb_stats
= I915_READ(PIPEBSTAT
);
816 printk(KERN_ERR
"memory refresh error\n");
817 printk(KERN_ERR
"PIPEASTAT: 0x%08x\n",
819 printk(KERN_ERR
"PIPEBSTAT: 0x%08x\n",
821 /* pipestat has already been acked */
823 if (eir
& I915_ERROR_INSTRUCTION
) {
824 printk(KERN_ERR
"instruction error\n");
825 printk(KERN_ERR
" INSTPM: 0x%08x\n",
827 if (INTEL_INFO(dev
)->gen
< 4) {
828 u32 ipeir
= I915_READ(IPEIR
);
830 printk(KERN_ERR
" IPEIR: 0x%08x\n",
832 printk(KERN_ERR
" IPEHR: 0x%08x\n",
834 printk(KERN_ERR
" INSTDONE: 0x%08x\n",
835 I915_READ(INSTDONE
));
836 printk(KERN_ERR
" ACTHD: 0x%08x\n",
838 I915_WRITE(IPEIR
, ipeir
);
839 (void)I915_READ(IPEIR
);
841 u32 ipeir
= I915_READ(IPEIR_I965
);
843 printk(KERN_ERR
" IPEIR: 0x%08x\n",
844 I915_READ(IPEIR_I965
));
845 printk(KERN_ERR
" IPEHR: 0x%08x\n",
846 I915_READ(IPEHR_I965
));
847 printk(KERN_ERR
" INSTDONE: 0x%08x\n",
848 I915_READ(INSTDONE_I965
));
849 printk(KERN_ERR
" INSTPS: 0x%08x\n",
851 printk(KERN_ERR
" INSTDONE1: 0x%08x\n",
852 I915_READ(INSTDONE1
));
853 printk(KERN_ERR
" ACTHD: 0x%08x\n",
854 I915_READ(ACTHD_I965
));
855 I915_WRITE(IPEIR_I965
, ipeir
);
856 (void)I915_READ(IPEIR_I965
);
860 I915_WRITE(EIR
, eir
);
861 (void)I915_READ(EIR
);
862 eir
= I915_READ(EIR
);
865 * some errors might have become stuck,
868 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir
);
869 I915_WRITE(EMR
, I915_READ(EMR
) | eir
);
870 I915_WRITE(IIR
, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
);
875 * i915_handle_error - handle an error interrupt
878 * Do some basic checking of regsiter state at error interrupt time and
879 * dump it to the syslog. Also call i915_capture_error_state() to make
880 * sure we get a record and make it available in debugfs. Fire a uevent
881 * so userspace knows something bad happened (should trigger collection
882 * of a ring dump etc.).
884 static void i915_handle_error(struct drm_device
*dev
, bool wedged
)
886 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
888 i915_capture_error_state(dev
);
889 i915_report_and_clear_eir(dev
);
892 INIT_COMPLETION(dev_priv
->error_completion
);
893 atomic_set(&dev_priv
->mm
.wedged
, 1);
896 * Wakeup waiting processes so they don't hang
898 wake_up_all(&dev_priv
->render_ring
.irq_queue
);
900 wake_up_all(&dev_priv
->bsd_ring
.irq_queue
);
902 wake_up_all(&dev_priv
->blt_ring
.irq_queue
);
905 queue_work(dev_priv
->wq
, &dev_priv
->error_work
);
908 static void i915_pageflip_stall_check(struct drm_device
*dev
, int pipe
)
910 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
911 struct drm_crtc
*crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
912 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
913 struct drm_i915_gem_object
*obj_priv
;
914 struct intel_unpin_work
*work
;
918 /* Ignore early vblank irqs */
919 if (intel_crtc
== NULL
)
922 spin_lock_irqsave(&dev
->event_lock
, flags
);
923 work
= intel_crtc
->unpin_work
;
925 if (work
== NULL
|| work
->pending
|| !work
->enable_stall_check
) {
926 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
927 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
931 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
932 obj_priv
= to_intel_bo(work
->pending_flip_obj
);
933 if (INTEL_INFO(dev
)->gen
>= 4) {
934 int dspsurf
= intel_crtc
->plane
== 0 ? DSPASURF
: DSPBSURF
;
935 stall_detected
= I915_READ(dspsurf
) == obj_priv
->gtt_offset
;
937 int dspaddr
= intel_crtc
->plane
== 0 ? DSPAADDR
: DSPBADDR
;
938 stall_detected
= I915_READ(dspaddr
) == (obj_priv
->gtt_offset
+
939 crtc
->y
* crtc
->fb
->pitch
+
940 crtc
->x
* crtc
->fb
->bits_per_pixel
/8);
943 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
945 if (stall_detected
) {
946 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
947 intel_prepare_page_flip(dev
, intel_crtc
->plane
);
951 irqreturn_t
i915_driver_irq_handler(DRM_IRQ_ARGS
)
953 struct drm_device
*dev
= (struct drm_device
*) arg
;
954 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
955 struct drm_i915_master_private
*master_priv
;
957 u32 pipea_stats
, pipeb_stats
;
960 unsigned long irqflags
;
964 atomic_inc(&dev_priv
->irq_received
);
966 if (HAS_PCH_SPLIT(dev
))
967 return ironlake_irq_handler(dev
);
969 iir
= I915_READ(IIR
);
971 if (INTEL_INFO(dev
)->gen
>= 4)
972 vblank_status
= PIPE_START_VBLANK_INTERRUPT_STATUS
;
974 vblank_status
= PIPE_VBLANK_INTERRUPT_STATUS
;
977 irq_received
= iir
!= 0;
979 /* Can't rely on pipestat interrupt bit in iir as it might
980 * have been cleared after the pipestat interrupt was received.
981 * It doesn't set the bit in iir again, but it still produces
982 * interrupts (for non-MSI).
984 spin_lock_irqsave(&dev_priv
->user_irq_lock
, irqflags
);
985 pipea_stats
= I915_READ(PIPEASTAT
);
986 pipeb_stats
= I915_READ(PIPEBSTAT
);
988 if (iir
& I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT
)
989 i915_handle_error(dev
, false);
992 * Clear the PIPE(A|B)STAT regs before the IIR
994 if (pipea_stats
& 0x8000ffff) {
995 if (pipea_stats
& PIPE_FIFO_UNDERRUN_STATUS
)
996 DRM_DEBUG_DRIVER("pipe a underrun\n");
997 I915_WRITE(PIPEASTAT
, pipea_stats
);
1001 if (pipeb_stats
& 0x8000ffff) {
1002 if (pipeb_stats
& PIPE_FIFO_UNDERRUN_STATUS
)
1003 DRM_DEBUG_DRIVER("pipe b underrun\n");
1004 I915_WRITE(PIPEBSTAT
, pipeb_stats
);
1007 spin_unlock_irqrestore(&dev_priv
->user_irq_lock
, irqflags
);
1014 /* Consume port. Then clear IIR or we'll miss events */
1015 if ((I915_HAS_HOTPLUG(dev
)) &&
1016 (iir
& I915_DISPLAY_PORT_INTERRUPT
)) {
1017 u32 hotplug_status
= I915_READ(PORT_HOTPLUG_STAT
);
1019 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1021 if (hotplug_status
& dev_priv
->hotplug_supported_mask
)
1022 queue_work(dev_priv
->wq
,
1023 &dev_priv
->hotplug_work
);
1025 I915_WRITE(PORT_HOTPLUG_STAT
, hotplug_status
);
1026 I915_READ(PORT_HOTPLUG_STAT
);
1029 I915_WRITE(IIR
, iir
);
1030 new_iir
= I915_READ(IIR
); /* Flush posted writes */
1032 if (dev
->primary
->master
) {
1033 master_priv
= dev
->primary
->master
->driver_priv
;
1034 if (master_priv
->sarea_priv
)
1035 master_priv
->sarea_priv
->last_dispatch
=
1036 READ_BREADCRUMB(dev_priv
);
1039 if (iir
& I915_USER_INTERRUPT
)
1040 notify_ring(dev
, &dev_priv
->render_ring
);
1041 if (HAS_BSD(dev
) && (iir
& I915_BSD_USER_INTERRUPT
))
1042 notify_ring(dev
, &dev_priv
->bsd_ring
);
1044 if (iir
& I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT
) {
1045 intel_prepare_page_flip(dev
, 0);
1046 if (dev_priv
->flip_pending_is_done
)
1047 intel_finish_page_flip_plane(dev
, 0);
1050 if (iir
& I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
) {
1051 intel_prepare_page_flip(dev
, 1);
1052 if (dev_priv
->flip_pending_is_done
)
1053 intel_finish_page_flip_plane(dev
, 1);
1056 if (pipea_stats
& vblank_status
) {
1058 drm_handle_vblank(dev
, 0);
1059 if (!dev_priv
->flip_pending_is_done
) {
1060 i915_pageflip_stall_check(dev
, 0);
1061 intel_finish_page_flip(dev
, 0);
1065 if (pipeb_stats
& vblank_status
) {
1067 drm_handle_vblank(dev
, 1);
1068 if (!dev_priv
->flip_pending_is_done
) {
1069 i915_pageflip_stall_check(dev
, 1);
1070 intel_finish_page_flip(dev
, 1);
1074 if ((pipea_stats
& PIPE_LEGACY_BLC_EVENT_STATUS
) ||
1075 (pipeb_stats
& PIPE_LEGACY_BLC_EVENT_STATUS
) ||
1076 (iir
& I915_ASLE_INTERRUPT
))
1077 intel_opregion_asle_intr(dev
);
1079 /* With MSI, interrupts are only generated when iir
1080 * transitions from zero to nonzero. If another bit got
1081 * set while we were handling the existing iir bits, then
1082 * we would never get another interrupt.
1084 * This is fine on non-MSI as well, as if we hit this path
1085 * we avoid exiting the interrupt handler only to generate
1088 * Note that for MSI this could cause a stray interrupt report
1089 * if an interrupt landed in the time between writing IIR and
1090 * the posting read. This should be rare enough to never
1091 * trigger the 99% of 100,000 interrupts test for disabling
1100 static int i915_emit_irq(struct drm_device
* dev
)
1102 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1103 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
1105 i915_kernel_lost_context(dev
);
1107 DRM_DEBUG_DRIVER("\n");
1109 dev_priv
->counter
++;
1110 if (dev_priv
->counter
> 0x7FFFFFFFUL
)
1111 dev_priv
->counter
= 1;
1112 if (master_priv
->sarea_priv
)
1113 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->counter
;
1115 if (BEGIN_LP_RING(4) == 0) {
1116 OUT_RING(MI_STORE_DWORD_INDEX
);
1117 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
1118 OUT_RING(dev_priv
->counter
);
1119 OUT_RING(MI_USER_INTERRUPT
);
1123 return dev_priv
->counter
;
1126 void i915_trace_irq_get(struct drm_device
*dev
, u32 seqno
)
1128 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1129 struct intel_ring_buffer
*render_ring
= &dev_priv
->render_ring
;
1131 if (dev_priv
->trace_irq_seqno
== 0)
1132 render_ring
->user_irq_get(render_ring
);
1134 dev_priv
->trace_irq_seqno
= seqno
;
1137 static int i915_wait_irq(struct drm_device
* dev
, int irq_nr
)
1139 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1140 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
1142 struct intel_ring_buffer
*render_ring
= &dev_priv
->render_ring
;
1144 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr
,
1145 READ_BREADCRUMB(dev_priv
));
1147 if (READ_BREADCRUMB(dev_priv
) >= irq_nr
) {
1148 if (master_priv
->sarea_priv
)
1149 master_priv
->sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
1153 if (master_priv
->sarea_priv
)
1154 master_priv
->sarea_priv
->perf_boxes
|= I915_BOX_WAIT
;
1156 render_ring
->user_irq_get(render_ring
);
1157 DRM_WAIT_ON(ret
, dev_priv
->render_ring
.irq_queue
, 3 * DRM_HZ
,
1158 READ_BREADCRUMB(dev_priv
) >= irq_nr
);
1159 render_ring
->user_irq_put(render_ring
);
1161 if (ret
== -EBUSY
) {
1162 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
1163 READ_BREADCRUMB(dev_priv
), (int)dev_priv
->counter
);
1169 /* Needs the lock as it touches the ring.
1171 int i915_irq_emit(struct drm_device
*dev
, void *data
,
1172 struct drm_file
*file_priv
)
1174 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1175 drm_i915_irq_emit_t
*emit
= data
;
1178 if (!dev_priv
|| !dev_priv
->render_ring
.virtual_start
) {
1179 DRM_ERROR("called with no initialization\n");
1183 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
1185 mutex_lock(&dev
->struct_mutex
);
1186 result
= i915_emit_irq(dev
);
1187 mutex_unlock(&dev
->struct_mutex
);
1189 if (DRM_COPY_TO_USER(emit
->irq_seq
, &result
, sizeof(int))) {
1190 DRM_ERROR("copy_to_user\n");
1197 /* Doesn't need the hardware lock.
1199 int i915_irq_wait(struct drm_device
*dev
, void *data
,
1200 struct drm_file
*file_priv
)
1202 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1203 drm_i915_irq_wait_t
*irqwait
= data
;
1206 DRM_ERROR("called with no initialization\n");
1210 return i915_wait_irq(dev
, irqwait
->irq_seq
);
1213 /* Called from drm generic code, passed 'crtc' which
1214 * we use as a pipe index
1216 int i915_enable_vblank(struct drm_device
*dev
, int pipe
)
1218 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1219 unsigned long irqflags
;
1221 if (!i915_pipe_enabled(dev
, pipe
))
1224 spin_lock_irqsave(&dev_priv
->user_irq_lock
, irqflags
);
1225 if (HAS_PCH_SPLIT(dev
))
1226 ironlake_enable_display_irq(dev_priv
, (pipe
== 0) ?
1227 DE_PIPEA_VBLANK
: DE_PIPEB_VBLANK
);
1228 else if (INTEL_INFO(dev
)->gen
>= 4)
1229 i915_enable_pipestat(dev_priv
, pipe
,
1230 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1232 i915_enable_pipestat(dev_priv
, pipe
,
1233 PIPE_VBLANK_INTERRUPT_ENABLE
);
1234 spin_unlock_irqrestore(&dev_priv
->user_irq_lock
, irqflags
);
1238 /* Called from drm generic code, passed 'crtc' which
1239 * we use as a pipe index
1241 void i915_disable_vblank(struct drm_device
*dev
, int pipe
)
1243 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1244 unsigned long irqflags
;
1246 spin_lock_irqsave(&dev_priv
->user_irq_lock
, irqflags
);
1247 if (HAS_PCH_SPLIT(dev
))
1248 ironlake_disable_display_irq(dev_priv
, (pipe
== 0) ?
1249 DE_PIPEA_VBLANK
: DE_PIPEB_VBLANK
);
1251 i915_disable_pipestat(dev_priv
, pipe
,
1252 PIPE_VBLANK_INTERRUPT_ENABLE
|
1253 PIPE_START_VBLANK_INTERRUPT_ENABLE
);
1254 spin_unlock_irqrestore(&dev_priv
->user_irq_lock
, irqflags
);
1257 void i915_enable_interrupt (struct drm_device
*dev
)
1259 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1261 if (!HAS_PCH_SPLIT(dev
))
1262 intel_opregion_enable_asle(dev
);
1263 dev_priv
->irq_enabled
= 1;
1267 /* Set the vblank monitor pipe
1269 int i915_vblank_pipe_set(struct drm_device
*dev
, void *data
,
1270 struct drm_file
*file_priv
)
1272 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1275 DRM_ERROR("called with no initialization\n");
1282 int i915_vblank_pipe_get(struct drm_device
*dev
, void *data
,
1283 struct drm_file
*file_priv
)
1285 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1286 drm_i915_vblank_pipe_t
*pipe
= data
;
1289 DRM_ERROR("called with no initialization\n");
1293 pipe
->pipe
= DRM_I915_VBLANK_PIPE_A
| DRM_I915_VBLANK_PIPE_B
;
1299 * Schedule buffer swap at given vertical blank.
1301 int i915_vblank_swap(struct drm_device
*dev
, void *data
,
1302 struct drm_file
*file_priv
)
1304 /* The delayed swap mechanism was fundamentally racy, and has been
1305 * removed. The model was that the client requested a delayed flip/swap
1306 * from the kernel, then waited for vblank before continuing to perform
1307 * rendering. The problem was that the kernel might wake the client
1308 * up before it dispatched the vblank swap (since the lock has to be
1309 * held while touching the ringbuffer), in which case the client would
1310 * clear and start the next frame before the swap occurred, and
1311 * flicker would occur in addition to likely missing the vblank.
1313 * In the absence of this ioctl, userland falls back to a correct path
1314 * of waiting for a vblank, then dispatching the swap on its own.
1315 * Context switching to userland and back is plenty fast enough for
1316 * meeting the requirements of vblank swapping.
1322 ring_last_seqno(struct intel_ring_buffer
*ring
)
1324 return list_entry(ring
->request_list
.prev
,
1325 struct drm_i915_gem_request
, list
)->seqno
;
1328 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer
*ring
, bool *err
)
1330 if (list_empty(&ring
->request_list
) ||
1331 i915_seqno_passed(ring
->get_seqno(ring
), ring_last_seqno(ring
))) {
1332 /* Issue a wake-up to catch stuck h/w. */
1333 if (ring
->waiting_seqno
&& waitqueue_active(&ring
->irq_queue
)) {
1334 DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
1336 ring
->waiting_seqno
,
1337 ring
->get_seqno(ring
));
1338 wake_up_all(&ring
->irq_queue
);
1347 * This is called when the chip hasn't reported back with completed
1348 * batchbuffers in a long time. The first time this is called we simply record
1349 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1350 * again, we assume the chip is wedged and try to fix it.
1352 void i915_hangcheck_elapsed(unsigned long data
)
1354 struct drm_device
*dev
= (struct drm_device
*)data
;
1355 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1356 uint32_t acthd
, instdone
, instdone1
;
1359 /* If all work is done then ACTHD clearly hasn't advanced. */
1360 if (i915_hangcheck_ring_idle(&dev_priv
->render_ring
, &err
) &&
1361 i915_hangcheck_ring_idle(&dev_priv
->bsd_ring
, &err
) &&
1362 i915_hangcheck_ring_idle(&dev_priv
->blt_ring
, &err
)) {
1363 dev_priv
->hangcheck_count
= 0;
1369 if (INTEL_INFO(dev
)->gen
< 4) {
1370 acthd
= I915_READ(ACTHD
);
1371 instdone
= I915_READ(INSTDONE
);
1374 acthd
= I915_READ(ACTHD_I965
);
1375 instdone
= I915_READ(INSTDONE_I965
);
1376 instdone1
= I915_READ(INSTDONE1
);
1379 if (dev_priv
->last_acthd
== acthd
&&
1380 dev_priv
->last_instdone
== instdone
&&
1381 dev_priv
->last_instdone1
== instdone1
) {
1382 if (dev_priv
->hangcheck_count
++ > 1) {
1383 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1385 if (!IS_GEN2(dev
)) {
1386 /* Is the chip hanging on a WAIT_FOR_EVENT?
1387 * If so we can simply poke the RB_WAIT bit
1388 * and break the hang. This should work on
1389 * all but the second generation chipsets.
1391 u32 tmp
= I915_READ(PRB0_CTL
);
1392 if (tmp
& RING_WAIT
) {
1393 I915_WRITE(PRB0_CTL
, tmp
);
1394 POSTING_READ(PRB0_CTL
);
1399 i915_handle_error(dev
, true);
1403 dev_priv
->hangcheck_count
= 0;
1405 dev_priv
->last_acthd
= acthd
;
1406 dev_priv
->last_instdone
= instdone
;
1407 dev_priv
->last_instdone1
= instdone1
;
1411 /* Reset timer case chip hangs without another request being added */
1412 mod_timer(&dev_priv
->hangcheck_timer
,
1413 jiffies
+ msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD
));
1418 static void ironlake_irq_preinstall(struct drm_device
*dev
)
1420 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1422 I915_WRITE(HWSTAM
, 0xeffe);
1424 /* XXX hotplug from PCH */
1426 I915_WRITE(DEIMR
, 0xffffffff);
1427 I915_WRITE(DEIER
, 0x0);
1428 (void) I915_READ(DEIER
);
1431 I915_WRITE(GTIMR
, 0xffffffff);
1432 I915_WRITE(GTIER
, 0x0);
1433 (void) I915_READ(GTIER
);
1435 /* south display irq */
1436 I915_WRITE(SDEIMR
, 0xffffffff);
1437 I915_WRITE(SDEIER
, 0x0);
1438 (void) I915_READ(SDEIER
);
1441 static int ironlake_irq_postinstall(struct drm_device
*dev
)
1443 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1444 /* enable kind of interrupts always enabled */
1445 u32 display_mask
= DE_MASTER_IRQ_CONTROL
| DE_GSE
| DE_PCH_EVENT
|
1446 DE_PLANEA_FLIP_DONE
| DE_PLANEB_FLIP_DONE
;
1447 u32 render_mask
= GT_PIPE_NOTIFY
| GT_BSD_USER_INTERRUPT
;
1450 dev_priv
->irq_mask_reg
= ~display_mask
;
1451 dev_priv
->de_irq_enable_reg
= display_mask
| DE_PIPEA_VBLANK
| DE_PIPEB_VBLANK
;
1453 /* should always can generate irq */
1454 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
1455 I915_WRITE(DEIMR
, dev_priv
->irq_mask_reg
);
1456 I915_WRITE(DEIER
, dev_priv
->de_irq_enable_reg
);
1457 (void) I915_READ(DEIER
);
1462 GT_GEN6_BSD_USER_INTERRUPT
|
1463 GT_BLT_USER_INTERRUPT
;
1466 dev_priv
->gt_irq_mask_reg
= ~render_mask
;
1467 dev_priv
->gt_irq_enable_reg
= render_mask
;
1469 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1470 I915_WRITE(GTIMR
, dev_priv
->gt_irq_mask_reg
);
1472 I915_WRITE(GEN6_RENDER_IMR
, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT
);
1473 I915_WRITE(GEN6_BSD_IMR
, ~GEN6_BSD_IMR_USER_INTERRUPT
);
1474 I915_WRITE(GEN6_BLITTER_IMR
, ~GEN6_BLITTER_USER_INTERRUPT
);
1477 I915_WRITE(GTIER
, dev_priv
->gt_irq_enable_reg
);
1478 (void) I915_READ(GTIER
);
1480 if (HAS_PCH_CPT(dev
)) {
1481 hotplug_mask
= SDE_CRT_HOTPLUG_CPT
| SDE_PORTB_HOTPLUG_CPT
|
1482 SDE_PORTC_HOTPLUG_CPT
| SDE_PORTD_HOTPLUG_CPT
;
1484 hotplug_mask
= SDE_CRT_HOTPLUG
| SDE_PORTB_HOTPLUG
|
1485 SDE_PORTC_HOTPLUG
| SDE_PORTD_HOTPLUG
;
1488 dev_priv
->pch_irq_mask_reg
= ~hotplug_mask
;
1489 dev_priv
->pch_irq_enable_reg
= hotplug_mask
;
1491 I915_WRITE(SDEIIR
, I915_READ(SDEIIR
));
1492 I915_WRITE(SDEIMR
, dev_priv
->pch_irq_mask_reg
);
1493 I915_WRITE(SDEIER
, dev_priv
->pch_irq_enable_reg
);
1494 (void) I915_READ(SDEIER
);
1496 if (IS_IRONLAKE_M(dev
)) {
1497 /* Clear & enable PCU event interrupts */
1498 I915_WRITE(DEIIR
, DE_PCU_EVENT
);
1499 I915_WRITE(DEIER
, I915_READ(DEIER
) | DE_PCU_EVENT
);
1500 ironlake_enable_display_irq(dev_priv
, DE_PCU_EVENT
);
1506 void i915_driver_irq_preinstall(struct drm_device
* dev
)
1508 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1510 atomic_set(&dev_priv
->irq_received
, 0);
1512 INIT_WORK(&dev_priv
->hotplug_work
, i915_hotplug_work_func
);
1513 INIT_WORK(&dev_priv
->error_work
, i915_error_work_func
);
1515 if (HAS_PCH_SPLIT(dev
)) {
1516 ironlake_irq_preinstall(dev
);
1520 if (I915_HAS_HOTPLUG(dev
)) {
1521 I915_WRITE(PORT_HOTPLUG_EN
, 0);
1522 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
1525 I915_WRITE(HWSTAM
, 0xeffe);
1526 I915_WRITE(PIPEASTAT
, 0);
1527 I915_WRITE(PIPEBSTAT
, 0);
1528 I915_WRITE(IMR
, 0xffffffff);
1529 I915_WRITE(IER
, 0x0);
1530 (void) I915_READ(IER
);
1534 * Must be called after intel_modeset_init or hotplug interrupts won't be
1535 * enabled correctly.
1537 int i915_driver_irq_postinstall(struct drm_device
*dev
)
1539 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1540 u32 enable_mask
= I915_INTERRUPT_ENABLE_FIX
| I915_INTERRUPT_ENABLE_VAR
;
1543 DRM_INIT_WAITQUEUE(&dev_priv
->render_ring
.irq_queue
);
1545 DRM_INIT_WAITQUEUE(&dev_priv
->bsd_ring
.irq_queue
);
1547 DRM_INIT_WAITQUEUE(&dev_priv
->blt_ring
.irq_queue
);
1549 dev_priv
->vblank_pipe
= DRM_I915_VBLANK_PIPE_A
| DRM_I915_VBLANK_PIPE_B
;
1551 if (HAS_PCH_SPLIT(dev
))
1552 return ironlake_irq_postinstall(dev
);
1554 /* Unmask the interrupts that we always want on. */
1555 dev_priv
->irq_mask_reg
= ~I915_INTERRUPT_ENABLE_FIX
;
1557 dev_priv
->pipestat
[0] = 0;
1558 dev_priv
->pipestat
[1] = 0;
1560 if (I915_HAS_HOTPLUG(dev
)) {
1561 /* Enable in IER... */
1562 enable_mask
|= I915_DISPLAY_PORT_INTERRUPT
;
1563 /* and unmask in IMR */
1564 dev_priv
->irq_mask_reg
&= ~I915_DISPLAY_PORT_INTERRUPT
;
1568 * Enable some error detection, note the instruction error mask
1569 * bit is reserved, so we leave it masked.
1572 error_mask
= ~(GM45_ERROR_PAGE_TABLE
|
1573 GM45_ERROR_MEM_PRIV
|
1574 GM45_ERROR_CP_PRIV
|
1575 I915_ERROR_MEMORY_REFRESH
);
1577 error_mask
= ~(I915_ERROR_PAGE_TABLE
|
1578 I915_ERROR_MEMORY_REFRESH
);
1580 I915_WRITE(EMR
, error_mask
);
1582 I915_WRITE(IMR
, dev_priv
->irq_mask_reg
);
1583 I915_WRITE(IER
, enable_mask
);
1584 (void) I915_READ(IER
);
1586 if (I915_HAS_HOTPLUG(dev
)) {
1587 u32 hotplug_en
= I915_READ(PORT_HOTPLUG_EN
);
1589 /* Note HDMI and DP share bits */
1590 if (dev_priv
->hotplug_supported_mask
& HDMIB_HOTPLUG_INT_STATUS
)
1591 hotplug_en
|= HDMIB_HOTPLUG_INT_EN
;
1592 if (dev_priv
->hotplug_supported_mask
& HDMIC_HOTPLUG_INT_STATUS
)
1593 hotplug_en
|= HDMIC_HOTPLUG_INT_EN
;
1594 if (dev_priv
->hotplug_supported_mask
& HDMID_HOTPLUG_INT_STATUS
)
1595 hotplug_en
|= HDMID_HOTPLUG_INT_EN
;
1596 if (dev_priv
->hotplug_supported_mask
& SDVOC_HOTPLUG_INT_STATUS
)
1597 hotplug_en
|= SDVOC_HOTPLUG_INT_EN
;
1598 if (dev_priv
->hotplug_supported_mask
& SDVOB_HOTPLUG_INT_STATUS
)
1599 hotplug_en
|= SDVOB_HOTPLUG_INT_EN
;
1600 if (dev_priv
->hotplug_supported_mask
& CRT_HOTPLUG_INT_STATUS
) {
1601 hotplug_en
|= CRT_HOTPLUG_INT_EN
;
1603 /* Programming the CRT detection parameters tends
1604 to generate a spurious hotplug event about three
1605 seconds later. So just do it once.
1608 hotplug_en
|= CRT_HOTPLUG_ACTIVATION_PERIOD_64
;
1609 hotplug_en
|= CRT_HOTPLUG_VOLTAGE_COMPARE_50
;
1612 /* Ignore TV since it's buggy */
1614 I915_WRITE(PORT_HOTPLUG_EN
, hotplug_en
);
1617 intel_opregion_enable_asle(dev
);
1622 static void ironlake_irq_uninstall(struct drm_device
*dev
)
1624 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1625 I915_WRITE(HWSTAM
, 0xffffffff);
1627 I915_WRITE(DEIMR
, 0xffffffff);
1628 I915_WRITE(DEIER
, 0x0);
1629 I915_WRITE(DEIIR
, I915_READ(DEIIR
));
1631 I915_WRITE(GTIMR
, 0xffffffff);
1632 I915_WRITE(GTIER
, 0x0);
1633 I915_WRITE(GTIIR
, I915_READ(GTIIR
));
1636 void i915_driver_irq_uninstall(struct drm_device
* dev
)
1638 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
1643 dev_priv
->vblank_pipe
= 0;
1645 if (HAS_PCH_SPLIT(dev
)) {
1646 ironlake_irq_uninstall(dev
);
1650 if (I915_HAS_HOTPLUG(dev
)) {
1651 I915_WRITE(PORT_HOTPLUG_EN
, 0);
1652 I915_WRITE(PORT_HOTPLUG_STAT
, I915_READ(PORT_HOTPLUG_STAT
));
1655 I915_WRITE(HWSTAM
, 0xffffffff);
1656 I915_WRITE(PIPEASTAT
, 0);
1657 I915_WRITE(PIPEBSTAT
, 0);
1658 I915_WRITE(IMR
, 0xffffffff);
1659 I915_WRITE(IER
, 0x0);
1661 I915_WRITE(PIPEASTAT
, I915_READ(PIPEASTAT
) & 0x8000ffff);
1662 I915_WRITE(PIPEBSTAT
, I915_READ(PIPEBSTAT
) & 0x8000ffff);
1663 I915_WRITE(IIR
, I915_READ(IIR
));