2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/kthread.h>
26 #include <uapi/linux/sched/types.h>
30 static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs
*b
)
32 struct intel_wait
*wait
;
33 unsigned int result
= 0;
35 lockdep_assert_held(&b
->irq_lock
);
39 result
= ENGINE_WAKEUP_WAITER
;
40 if (wake_up_process(wait
->tsk
))
41 result
|= ENGINE_WAKEUP_ASLEEP
;
47 unsigned int intel_engine_wakeup(struct intel_engine_cs
*engine
)
49 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
53 spin_lock_irqsave(&b
->irq_lock
, flags
);
54 result
= __intel_breadcrumbs_wakeup(b
);
55 spin_unlock_irqrestore(&b
->irq_lock
, flags
);
60 static unsigned long wait_timeout(void)
62 return round_jiffies_up(jiffies
+ DRM_I915_HANGCHECK_JIFFIES
);
65 static noinline
void missed_breadcrumb(struct intel_engine_cs
*engine
)
67 DRM_DEBUG_DRIVER("%s missed breadcrumb at %pF, irq posted? %s\n",
68 engine
->name
, __builtin_return_address(0),
69 yesno(test_bit(ENGINE_IRQ_BREADCRUMB
,
70 &engine
->irq_posted
)));
72 set_bit(engine
->id
, &engine
->i915
->gpu_error
.missed_irq_rings
);
75 static void intel_breadcrumbs_hangcheck(unsigned long data
)
77 struct intel_engine_cs
*engine
= (struct intel_engine_cs
*)data
;
78 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
83 if (b
->hangcheck_interrupts
!= atomic_read(&engine
->irq_count
)) {
84 b
->hangcheck_interrupts
= atomic_read(&engine
->irq_count
);
85 mod_timer(&b
->hangcheck
, wait_timeout());
89 /* We keep the hangcheck timer alive until we disarm the irq, even
90 * if there are no waiters at present.
92 * If the waiter was currently running, assume it hasn't had a chance
93 * to process the pending interrupt (e.g, low priority task on a loaded
94 * system) and wait until it sleeps before declaring a missed interrupt.
96 * If the waiter was asleep (and not even pending a wakeup), then we
97 * must have missed an interrupt as the GPU has stopped advancing
98 * but we still have a waiter. Assuming all batches complete within
99 * DRM_I915_HANGCHECK_JIFFIES [1.5s]!
101 if (intel_engine_wakeup(engine
) & ENGINE_WAKEUP_ASLEEP
) {
102 missed_breadcrumb(engine
);
103 mod_timer(&engine
->breadcrumbs
.fake_irq
, jiffies
+ 1);
105 mod_timer(&b
->hangcheck
, wait_timeout());
109 static void intel_breadcrumbs_fake_irq(unsigned long data
)
111 struct intel_engine_cs
*engine
= (struct intel_engine_cs
*)data
;
112 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
114 /* The timer persists in case we cannot enable interrupts,
115 * or if we have previously seen seqno/interrupt incoherency
116 * ("missed interrupt" syndrome, better known as a "missed breadcrumb").
117 * Here the worker will wake up every jiffie in order to kick the
118 * oldest waiter to do the coherent seqno check.
121 spin_lock_irq(&b
->irq_lock
);
122 if (!__intel_breadcrumbs_wakeup(b
))
123 __intel_engine_disarm_breadcrumbs(engine
);
124 spin_unlock_irq(&b
->irq_lock
);
128 mod_timer(&b
->fake_irq
, jiffies
+ 1);
130 /* Ensure that even if the GPU hangs, we get woken up.
132 * However, note that if no one is waiting, we never notice
133 * a gpu hang. Eventually, we will have to wait for a resource
134 * held by the GPU and so trigger a hangcheck. In the most
135 * pathological case, this will be upon memory starvation! To
136 * prevent this, we also queue the hangcheck from the retire
139 i915_queue_hangcheck(engine
->i915
);
142 static void irq_enable(struct intel_engine_cs
*engine
)
144 /* Enabling the IRQ may miss the generation of the interrupt, but
145 * we still need to force the barrier before reading the seqno,
148 set_bit(ENGINE_IRQ_BREADCRUMB
, &engine
->irq_posted
);
150 /* Caller disables interrupts */
151 spin_lock(&engine
->i915
->irq_lock
);
152 engine
->irq_enable(engine
);
153 spin_unlock(&engine
->i915
->irq_lock
);
156 static void irq_disable(struct intel_engine_cs
*engine
)
158 /* Caller disables interrupts */
159 spin_lock(&engine
->i915
->irq_lock
);
160 engine
->irq_disable(engine
);
161 spin_unlock(&engine
->i915
->irq_lock
);
164 void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs
*engine
)
166 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
168 lockdep_assert_held(&b
->irq_lock
);
169 GEM_BUG_ON(b
->irq_wait
);
171 if (b
->irq_enabled
) {
173 b
->irq_enabled
= false;
176 b
->irq_armed
= false;
179 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs
*engine
)
181 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
182 struct intel_wait
*wait
, *n
, *first
;
187 /* We only disarm the irq when we are idle (all requests completed),
188 * so if the bottom-half remains asleep, it missed the request
192 spin_lock_irq(&b
->rb_lock
);
194 spin_lock(&b
->irq_lock
);
195 first
= fetch_and_zero(&b
->irq_wait
);
196 __intel_engine_disarm_breadcrumbs(engine
);
197 spin_unlock(&b
->irq_lock
);
199 rbtree_postorder_for_each_entry_safe(wait
, n
, &b
->waiters
, node
) {
200 RB_CLEAR_NODE(&wait
->node
);
201 if (wake_up_process(wait
->tsk
) && wait
== first
)
202 missed_breadcrumb(engine
);
204 b
->waiters
= RB_ROOT
;
206 spin_unlock_irq(&b
->rb_lock
);
209 static bool use_fake_irq(const struct intel_breadcrumbs
*b
)
211 const struct intel_engine_cs
*engine
=
212 container_of(b
, struct intel_engine_cs
, breadcrumbs
);
214 if (!test_bit(engine
->id
, &engine
->i915
->gpu_error
.missed_irq_rings
))
217 /* Only start with the heavy weight fake irq timer if we have not
218 * seen any interrupts since enabling it the first time. If the
219 * interrupts are still arriving, it means we made a mistake in our
220 * engine->seqno_barrier(), a timing error that should be transient
221 * and unlikely to reoccur.
223 return atomic_read(&engine
->irq_count
) == b
->hangcheck_interrupts
;
226 static void enable_fake_irq(struct intel_breadcrumbs
*b
)
228 /* Ensure we never sleep indefinitely */
229 if (!b
->irq_enabled
|| use_fake_irq(b
))
230 mod_timer(&b
->fake_irq
, jiffies
+ 1);
232 mod_timer(&b
->hangcheck
, wait_timeout());
235 static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs
*b
)
237 struct intel_engine_cs
*engine
=
238 container_of(b
, struct intel_engine_cs
, breadcrumbs
);
239 struct drm_i915_private
*i915
= engine
->i915
;
241 lockdep_assert_held(&b
->irq_lock
);
245 /* The breadcrumb irq will be disarmed on the interrupt after the
246 * waiters are signaled. This gives us a single interrupt window in
247 * which we can add a new waiter and avoid the cost of re-enabling
251 GEM_BUG_ON(b
->irq_enabled
);
253 if (I915_SELFTEST_ONLY(b
->mock
)) {
254 /* For our mock objects we want to avoid interaction
255 * with the real hardware (which is not set up). So
256 * we simply pretend we have enabled the powerwell
257 * and the irq, and leave it up to the mock
258 * implementation to call intel_engine_wakeup()
259 * itself when it wants to simulate a user interrupt,
264 /* Since we are waiting on a request, the GPU should be busy
265 * and should have its own rpm reference. This is tracked
266 * by i915->gt.awake, we can forgo holding our own wakref
267 * for the interrupt as before i915->gt.awake is released (when
268 * the driver is idle) we disarm the breadcrumbs.
271 /* No interrupts? Kick the waiter every jiffie! */
272 if (intel_irqs_enabled(i915
)) {
273 if (!test_bit(engine
->id
, &i915
->gpu_error
.test_irq_rings
))
275 b
->irq_enabled
= true;
281 static inline struct intel_wait
*to_wait(struct rb_node
*node
)
283 return rb_entry(node
, struct intel_wait
, node
);
286 static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs
*b
,
287 struct intel_wait
*wait
)
289 lockdep_assert_held(&b
->rb_lock
);
290 GEM_BUG_ON(b
->irq_wait
== wait
);
292 /* This request is completed, so remove it from the tree, mark it as
293 * complete, and *then* wake up the associated task. N.B. when the
294 * task wakes up, it will find the empty rb_node, discern that it
295 * has already been removed from the tree and skip the serialisation
296 * of the b->rb_lock and b->irq_lock. This means that the destruction
297 * of the intel_wait is not serialised with the interrupt handler
298 * by the waiter - it must instead be serialised by the caller.
300 rb_erase(&wait
->node
, &b
->waiters
);
301 RB_CLEAR_NODE(&wait
->node
);
303 wake_up_process(wait
->tsk
); /* implicit smp_wmb() */
306 static inline void __intel_breadcrumbs_next(struct intel_engine_cs
*engine
,
307 struct rb_node
*next
)
309 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
311 spin_lock(&b
->irq_lock
);
312 GEM_BUG_ON(!b
->irq_armed
);
313 GEM_BUG_ON(!b
->irq_wait
);
314 b
->irq_wait
= to_wait(next
);
315 spin_unlock(&b
->irq_lock
);
317 /* We always wake up the next waiter that takes over as the bottom-half
318 * as we may delegate not only the irq-seqno barrier to the next waiter
319 * but also the task of waking up concurrent waiters.
322 wake_up_process(to_wait(next
)->tsk
);
325 static bool __intel_engine_add_wait(struct intel_engine_cs
*engine
,
326 struct intel_wait
*wait
)
328 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
329 struct rb_node
**p
, *parent
, *completed
;
333 /* Insert the request into the retirement ordered list
334 * of waiters by walking the rbtree. If we are the oldest
335 * seqno in the tree (the first to be retired), then
336 * set ourselves as the bottom-half.
338 * As we descend the tree, prune completed branches since we hold the
339 * spinlock we know that the first_waiter must be delayed and can
340 * reduce some of the sequential wake up latency if we take action
341 * ourselves and wake up the completed tasks in parallel. Also, by
342 * removing stale elements in the tree, we may be able to reduce the
343 * ping-pong between the old bottom-half and ourselves as first-waiter.
348 seqno
= intel_engine_get_seqno(engine
);
350 /* If the request completed before we managed to grab the spinlock,
351 * return now before adding ourselves to the rbtree. We let the
352 * current bottom-half handle any pending wakeups and instead
353 * try and get out of the way quickly.
355 if (i915_seqno_passed(seqno
, wait
->seqno
)) {
356 RB_CLEAR_NODE(&wait
->node
);
360 p
= &b
->waiters
.rb_node
;
363 if (wait
->seqno
== to_wait(parent
)->seqno
) {
364 /* We have multiple waiters on the same seqno, select
365 * the highest priority task (that with the smallest
366 * task->prio) to serve as the bottom-half for this
369 if (wait
->tsk
->prio
> to_wait(parent
)->tsk
->prio
) {
370 p
= &parent
->rb_right
;
373 p
= &parent
->rb_left
;
375 } else if (i915_seqno_passed(wait
->seqno
,
376 to_wait(parent
)->seqno
)) {
377 p
= &parent
->rb_right
;
378 if (i915_seqno_passed(seqno
, to_wait(parent
)->seqno
))
383 p
= &parent
->rb_left
;
386 rb_link_node(&wait
->node
, parent
, p
);
387 rb_insert_color(&wait
->node
, &b
->waiters
);
390 spin_lock(&b
->irq_lock
);
392 /* After assigning ourselves as the new bottom-half, we must
393 * perform a cursory check to prevent a missed interrupt.
394 * Either we miss the interrupt whilst programming the hardware,
395 * or if there was a previous waiter (for a later seqno) they
396 * may be woken instead of us (due to the inherent race
397 * in the unlocked read of b->irq_seqno_bh in the irq handler)
398 * and so we miss the wake up.
400 __intel_breadcrumbs_enable_irq(b
);
401 spin_unlock(&b
->irq_lock
);
405 /* Advance the bottom-half (b->irq_wait) before we wake up
406 * the waiters who may scribble over their intel_wait
407 * just as the interrupt handler is dereferencing it via
411 struct rb_node
*next
= rb_next(completed
);
412 GEM_BUG_ON(next
== &wait
->node
);
413 __intel_breadcrumbs_next(engine
, next
);
417 struct intel_wait
*crumb
= to_wait(completed
);
418 completed
= rb_prev(completed
);
419 __intel_breadcrumbs_finish(b
, crumb
);
423 GEM_BUG_ON(!b
->irq_wait
);
424 GEM_BUG_ON(!b
->irq_armed
);
425 GEM_BUG_ON(rb_first(&b
->waiters
) != &b
->irq_wait
->node
);
430 bool intel_engine_add_wait(struct intel_engine_cs
*engine
,
431 struct intel_wait
*wait
)
433 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
436 spin_lock_irq(&b
->rb_lock
);
437 first
= __intel_engine_add_wait(engine
, wait
);
438 spin_unlock_irq(&b
->rb_lock
);
443 static inline bool chain_wakeup(struct rb_node
*rb
, int priority
)
445 return rb
&& to_wait(rb
)->tsk
->prio
<= priority
;
448 static inline int wakeup_priority(struct intel_breadcrumbs
*b
,
449 struct task_struct
*tsk
)
451 if (tsk
== b
->signaler
)
457 static void __intel_engine_remove_wait(struct intel_engine_cs
*engine
,
458 struct intel_wait
*wait
)
460 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
462 lockdep_assert_held(&b
->rb_lock
);
464 if (RB_EMPTY_NODE(&wait
->node
))
467 if (b
->irq_wait
== wait
) {
468 const int priority
= wakeup_priority(b
, wait
->tsk
);
469 struct rb_node
*next
;
471 /* We are the current bottom-half. Find the next candidate,
472 * the first waiter in the queue on the remaining oldest
473 * request. As multiple seqnos may complete in the time it
474 * takes us to wake up and find the next waiter, we have to
475 * wake up that waiter for it to perform its own coherent
478 next
= rb_next(&wait
->node
);
479 if (chain_wakeup(next
, priority
)) {
480 /* If the next waiter is already complete,
481 * wake it up and continue onto the next waiter. So
482 * if have a small herd, they will wake up in parallel
483 * rather than sequentially, which should reduce
484 * the overall latency in waking all the completed
487 * However, waking up a chain adds extra latency to
488 * the first_waiter. This is undesirable if that
489 * waiter is a high priority task.
491 u32 seqno
= intel_engine_get_seqno(engine
);
493 while (i915_seqno_passed(seqno
, to_wait(next
)->seqno
)) {
494 struct rb_node
*n
= rb_next(next
);
496 __intel_breadcrumbs_finish(b
, to_wait(next
));
498 if (!chain_wakeup(next
, priority
))
503 __intel_breadcrumbs_next(engine
, next
);
505 GEM_BUG_ON(rb_first(&b
->waiters
) == &wait
->node
);
508 GEM_BUG_ON(RB_EMPTY_NODE(&wait
->node
));
509 rb_erase(&wait
->node
, &b
->waiters
);
512 GEM_BUG_ON(b
->irq_wait
== wait
);
513 GEM_BUG_ON(rb_first(&b
->waiters
) !=
514 (b
->irq_wait
? &b
->irq_wait
->node
: NULL
));
517 void intel_engine_remove_wait(struct intel_engine_cs
*engine
,
518 struct intel_wait
*wait
)
520 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
522 /* Quick check to see if this waiter was already decoupled from
523 * the tree by the bottom-half to avoid contention on the spinlock
526 if (RB_EMPTY_NODE(&wait
->node
)) {
527 GEM_BUG_ON(READ_ONCE(b
->irq_wait
) == wait
);
531 spin_lock_irq(&b
->rb_lock
);
532 __intel_engine_remove_wait(engine
, wait
);
533 spin_unlock_irq(&b
->rb_lock
);
536 static bool signal_valid(const struct drm_i915_gem_request
*request
)
538 return intel_wait_check_request(&request
->signaling
.wait
, request
);
541 static bool signal_complete(const struct drm_i915_gem_request
*request
)
546 /* If another process served as the bottom-half it may have already
547 * signalled that this wait is already completed.
549 if (intel_wait_complete(&request
->signaling
.wait
))
550 return signal_valid(request
);
552 /* Carefully check if the request is complete, giving time for the
553 * seqno to be visible or if the GPU hung.
555 if (__i915_request_irq_complete(request
))
561 static struct drm_i915_gem_request
*to_signaler(struct rb_node
*rb
)
563 return rb_entry(rb
, struct drm_i915_gem_request
, signaling
.node
);
566 static void signaler_set_rtpriority(void)
568 struct sched_param param
= { .sched_priority
= 1 };
570 sched_setscheduler_nocheck(current
, SCHED_FIFO
, ¶m
);
573 static int intel_breadcrumbs_signaler(void *arg
)
575 struct intel_engine_cs
*engine
= arg
;
576 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
577 struct drm_i915_gem_request
*request
;
579 /* Install ourselves with high priority to reduce signalling latency */
580 signaler_set_rtpriority();
583 bool do_schedule
= true;
585 set_current_state(TASK_INTERRUPTIBLE
);
587 /* We are either woken up by the interrupt bottom-half,
588 * or by a client adding a new signaller. In both cases,
589 * the GPU seqno may have advanced beyond our oldest signal.
590 * If it has, propagate the signal, remove the waiter and
591 * check again with the next oldest signal. Otherwise we
592 * need to wait for a new interrupt from the GPU or for
596 request
= rcu_dereference(b
->first_signal
);
598 request
= i915_gem_request_get_rcu(request
);
600 if (signal_complete(request
)) {
602 dma_fence_signal(&request
->fence
);
603 local_bh_enable(); /* kick start the tasklets */
605 spin_lock_irq(&b
->rb_lock
);
607 /* Wake up all other completed waiters and select the
608 * next bottom-half for the next user interrupt.
610 __intel_engine_remove_wait(engine
,
611 &request
->signaling
.wait
);
613 /* Find the next oldest signal. Note that as we have
614 * not been holding the lock, another client may
615 * have installed an even older signal than the one
616 * we just completed - so double check we are still
617 * the oldest before picking the next one.
619 if (request
== rcu_access_pointer(b
->first_signal
)) {
621 rb_next(&request
->signaling
.node
);
622 rcu_assign_pointer(b
->first_signal
,
623 rb
? to_signaler(rb
) : NULL
);
625 rb_erase(&request
->signaling
.node
, &b
->signals
);
626 RB_CLEAR_NODE(&request
->signaling
.node
);
628 spin_unlock_irq(&b
->rb_lock
);
630 i915_gem_request_put(request
);
632 /* If the engine is saturated we may be continually
633 * processing completed requests. This angers the
634 * NMI watchdog if we never let anything else
635 * have access to the CPU. Let's pretend to be nice
636 * and relinquish the CPU if we burn through the
637 * entire RT timeslice!
639 do_schedule
= need_resched();
642 if (unlikely(do_schedule
)) {
645 if (kthread_should_park())
648 if (kthread_should_stop()) {
654 add_wait_queue(&request
->execute
, &exec
);
659 remove_wait_queue(&request
->execute
, &exec
);
661 i915_gem_request_put(request
);
663 __set_current_state(TASK_RUNNING
);
668 void intel_engine_enable_signaling(struct drm_i915_gem_request
*request
)
670 struct intel_engine_cs
*engine
= request
->engine
;
671 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
672 struct rb_node
*parent
, **p
;
676 /* Note that we may be called from an interrupt handler on another
677 * device (e.g. nouveau signaling a fence completion causing us
678 * to submit a request, and so enable signaling). As such,
679 * we need to make sure that all other users of b->rb_lock protect
680 * against interrupts, i.e. use spin_lock_irqsave.
683 /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
684 GEM_BUG_ON(!irqs_disabled());
685 lockdep_assert_held(&request
->lock
);
687 seqno
= i915_gem_request_global_seqno(request
);
691 request
->signaling
.wait
.tsk
= b
->signaler
;
692 request
->signaling
.wait
.request
= request
;
693 request
->signaling
.wait
.seqno
= seqno
;
694 i915_gem_request_get(request
);
696 spin_lock(&b
->rb_lock
);
698 /* First add ourselves into the list of waiters, but register our
699 * bottom-half as the signaller thread. As per usual, only the oldest
700 * waiter (not just signaller) is tasked as the bottom-half waking
701 * up all completed waiters after the user interrupt.
703 * If we are the oldest waiter, enable the irq (after which we
704 * must double check that the seqno did not complete).
706 wakeup
= __intel_engine_add_wait(engine
, &request
->signaling
.wait
);
708 /* Now insert ourselves into the retirement ordered list of signals
709 * on this engine. We track the oldest seqno as that will be the
710 * first signal to complete.
714 p
= &b
->signals
.rb_node
;
717 if (i915_seqno_passed(seqno
,
718 to_signaler(parent
)->signaling
.wait
.seqno
)) {
719 p
= &parent
->rb_right
;
722 p
= &parent
->rb_left
;
725 rb_link_node(&request
->signaling
.node
, parent
, p
);
726 rb_insert_color(&request
->signaling
.node
, &b
->signals
);
728 rcu_assign_pointer(b
->first_signal
, request
);
730 spin_unlock(&b
->rb_lock
);
733 wake_up_process(b
->signaler
);
736 void intel_engine_cancel_signaling(struct drm_i915_gem_request
*request
)
738 struct intel_engine_cs
*engine
= request
->engine
;
739 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
741 GEM_BUG_ON(!irqs_disabled());
742 lockdep_assert_held(&request
->lock
);
743 GEM_BUG_ON(!request
->signaling
.wait
.seqno
);
745 spin_lock(&b
->rb_lock
);
747 if (!RB_EMPTY_NODE(&request
->signaling
.node
)) {
748 if (request
== rcu_access_pointer(b
->first_signal
)) {
750 rb_next(&request
->signaling
.node
);
751 rcu_assign_pointer(b
->first_signal
,
752 rb
? to_signaler(rb
) : NULL
);
754 rb_erase(&request
->signaling
.node
, &b
->signals
);
755 RB_CLEAR_NODE(&request
->signaling
.node
);
756 i915_gem_request_put(request
);
759 __intel_engine_remove_wait(engine
, &request
->signaling
.wait
);
761 spin_unlock(&b
->rb_lock
);
763 request
->signaling
.wait
.seqno
= 0;
766 int intel_engine_init_breadcrumbs(struct intel_engine_cs
*engine
)
768 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
769 struct task_struct
*tsk
;
771 spin_lock_init(&b
->rb_lock
);
772 spin_lock_init(&b
->irq_lock
);
774 setup_timer(&b
->fake_irq
,
775 intel_breadcrumbs_fake_irq
,
776 (unsigned long)engine
);
777 setup_timer(&b
->hangcheck
,
778 intel_breadcrumbs_hangcheck
,
779 (unsigned long)engine
);
781 /* Spawn a thread to provide a common bottom-half for all signals.
782 * As this is an asynchronous interface we cannot steal the current
783 * task for handling the bottom-half to the user interrupt, therefore
784 * we create a thread to do the coherent seqno dance after the
785 * interrupt and then signal the waitqueue (via the dma-buf/fence).
787 tsk
= kthread_run(intel_breadcrumbs_signaler
, engine
,
788 "i915/signal:%d", engine
->id
);
797 static void cancel_fake_irq(struct intel_engine_cs
*engine
)
799 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
801 del_timer_sync(&b
->hangcheck
);
802 del_timer_sync(&b
->fake_irq
);
803 clear_bit(engine
->id
, &engine
->i915
->gpu_error
.missed_irq_rings
);
806 void intel_engine_reset_breadcrumbs(struct intel_engine_cs
*engine
)
808 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
810 cancel_fake_irq(engine
);
811 spin_lock_irq(&b
->irq_lock
);
818 /* We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
819 * GPU is active and may have already executed the MI_USER_INTERRUPT
820 * before the CPU is ready to receive. However, the engine is currently
821 * idle (we haven't started it yet), there is no possibility for a
822 * missed interrupt as we enabled the irq and so we can clear the
823 * immediate wakeup (until a real interrupt arrives for the waiter).
825 clear_bit(ENGINE_IRQ_BREADCRUMB
, &engine
->irq_posted
);
830 spin_unlock_irq(&b
->irq_lock
);
833 void intel_engine_fini_breadcrumbs(struct intel_engine_cs
*engine
)
835 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
837 /* The engines should be idle and all requests accounted for! */
838 WARN_ON(READ_ONCE(b
->irq_wait
));
839 WARN_ON(!RB_EMPTY_ROOT(&b
->waiters
));
840 WARN_ON(rcu_access_pointer(b
->first_signal
));
841 WARN_ON(!RB_EMPTY_ROOT(&b
->signals
));
843 if (!IS_ERR_OR_NULL(b
->signaler
))
844 kthread_stop(b
->signaler
);
846 cancel_fake_irq(engine
);
849 bool intel_breadcrumbs_busy(struct intel_engine_cs
*engine
)
851 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
854 spin_lock_irq(&b
->rb_lock
);
857 wake_up_process(b
->irq_wait
->tsk
);
861 if (rcu_access_pointer(b
->first_signal
)) {
862 wake_up_process(b
->signaler
);
866 spin_unlock_irq(&b
->rb_lock
);
871 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
872 #include "selftests/intel_breadcrumbs.c"