2 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
9 #define pr_fmt(fmt) "xive-kvm: " fmt
11 #include <linux/kernel.h>
12 #include <linux/kvm_host.h>
13 #include <linux/err.h>
14 #include <linux/gfp.h>
15 #include <linux/spinlock.h>
16 #include <linux/delay.h>
17 #include <linux/percpu.h>
18 #include <linux/cpumask.h>
19 #include <linux/uaccess.h>
20 #include <asm/kvm_book3s.h>
21 #include <asm/kvm_ppc.h>
22 #include <asm/hvcall.h>
25 #include <asm/xive-regs.h>
26 #include <asm/debug.h>
27 #include <asm/debugfs.h>
31 #include <linux/debugfs.h>
32 #include <linux/seq_file.h>
34 #include "book3s_xive.h"
38 * Virtual mode variants of the hcalls for use on radix/radix
39 * with AIL. They require the VCPU's VP to be "pushed"
41 * We still instantiate them here because we use some of the
42 * generated utility functions as well in this file.
44 #define XIVE_RUNTIME_CHECKS
45 #define X_PFX xive_vm_
46 #define X_STATIC static
47 #define X_STAT_PFX stat_vm_
48 #define __x_tima xive_tima
49 #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
50 #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
51 #define __x_writeb __raw_writeb
52 #define __x_readw __raw_readw
53 #define __x_readq __raw_readq
54 #define __x_writeq __raw_writeq
56 #include "book3s_xive_template.c"
59 * We leave a gap of a couple of interrupts in the queue to
60 * account for the IPI and additional safety guard.
65 * This is a simple trigger for a generic XIVE IRQ. This must
66 * only be called for interrupts that support a trigger page
68 static bool xive_irq_trigger(struct xive_irq_data
*xd
)
70 /* This should be only for MSIs */
71 if (WARN_ON(xd
->flags
& XIVE_IRQ_FLAG_LSI
))
74 /* Those interrupts should always have a trigger page */
75 if (WARN_ON(!xd
->trig_mmio
))
78 out_be64(xd
->trig_mmio
, 0);
83 static irqreturn_t
xive_esc_irq(int irq
, void *data
)
85 struct kvm_vcpu
*vcpu
= data
;
87 vcpu
->arch
.irq_pending
= 1;
90 kvmppc_fast_vcpu_kick(vcpu
);
92 /* Since we have the no-EOI flag, the interrupt is effectively
93 * disabled now. Clearing xive_esc_on means we won't bother
94 * doing so on the next entry.
96 * This also allows the entry code to know that if a PQ combination
97 * of 10 is observed while xive_esc_on is true, it means the queue
98 * contains an unprocessed escalation interrupt. We don't make use of
99 * that knowledge today but might (see comment in book3s_hv_rmhandler.S)
101 vcpu
->arch
.xive_esc_on
= false;
106 static int xive_attach_escalation(struct kvm_vcpu
*vcpu
, u8 prio
)
108 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
109 struct xive_q
*q
= &xc
->queues
[prio
];
113 /* Already there ? */
114 if (xc
->esc_virq
[prio
])
117 /* Hook up the escalation interrupt */
118 xc
->esc_virq
[prio
] = irq_create_mapping(NULL
, q
->esc_irq
);
119 if (!xc
->esc_virq
[prio
]) {
120 pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
121 prio
, xc
->server_num
);
125 if (xc
->xive
->single_escalation
)
126 name
= kasprintf(GFP_KERNEL
, "kvm-%d-%d",
127 vcpu
->kvm
->arch
.lpid
, xc
->server_num
);
129 name
= kasprintf(GFP_KERNEL
, "kvm-%d-%d-%d",
130 vcpu
->kvm
->arch
.lpid
, xc
->server_num
, prio
);
132 pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
133 prio
, xc
->server_num
);
138 pr_devel("Escalation %s irq %d (prio %d)\n", name
, xc
->esc_virq
[prio
], prio
);
140 rc
= request_irq(xc
->esc_virq
[prio
], xive_esc_irq
,
141 IRQF_NO_THREAD
, name
, vcpu
);
143 pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
144 prio
, xc
->server_num
);
147 xc
->esc_virq_names
[prio
] = name
;
149 /* In single escalation mode, we grab the ESB MMIO of the
150 * interrupt and mask it. Also populate the VCPU v/raddr
151 * of the ESB page for use by asm entry/exit code. Finally
152 * set the XIVE_IRQ_NO_EOI flag which will prevent the
153 * core code from performing an EOI on the escalation
154 * interrupt, thus leaving it effectively masked after
157 if (xc
->xive
->single_escalation
) {
158 struct irq_data
*d
= irq_get_irq_data(xc
->esc_virq
[prio
]);
159 struct xive_irq_data
*xd
= irq_data_get_irq_handler_data(d
);
161 xive_vm_esb_load(xd
, XIVE_ESB_SET_PQ_01
);
162 vcpu
->arch
.xive_esc_raddr
= xd
->eoi_page
;
163 vcpu
->arch
.xive_esc_vaddr
= (__force u64
)xd
->eoi_mmio
;
164 xd
->flags
|= XIVE_IRQ_NO_EOI
;
169 irq_dispose_mapping(xc
->esc_virq
[prio
]);
170 xc
->esc_virq
[prio
] = 0;
175 static int xive_provision_queue(struct kvm_vcpu
*vcpu
, u8 prio
)
177 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
178 struct kvmppc_xive
*xive
= xc
->xive
;
179 struct xive_q
*q
= &xc
->queues
[prio
];
183 if (WARN_ON(q
->qpage
))
186 /* Allocate the queue and retrieve infos on current node for now */
187 qpage
= (__be32
*)__get_free_pages(GFP_KERNEL
, xive
->q_page_order
);
189 pr_err("Failed to allocate queue %d for VCPU %d\n",
190 prio
, xc
->server_num
);
193 memset(qpage
, 0, 1 << xive
->q_order
);
196 * Reconfigure the queue. This will set q->qpage only once the
197 * queue is fully configured. This is a requirement for prio 0
198 * as we will stop doing EOIs for every IPI as soon as we observe
199 * qpage being non-NULL, and instead will only EOI when we receive
200 * corresponding queue 0 entries
202 rc
= xive_native_configure_queue(xc
->vp_id
, q
, prio
, qpage
,
203 xive
->q_order
, true);
205 pr_err("Failed to configure queue %d for VCPU %d\n",
206 prio
, xc
->server_num
);
210 /* Called with kvm_lock held */
211 static int xive_check_provisioning(struct kvm
*kvm
, u8 prio
)
213 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
214 struct kvm_vcpu
*vcpu
;
217 lockdep_assert_held(&kvm
->lock
);
219 /* Already provisioned ? */
220 if (xive
->qmap
& (1 << prio
))
223 pr_devel("Provisioning prio... %d\n", prio
);
225 /* Provision each VCPU and enable escalations if needed */
226 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
227 if (!vcpu
->arch
.xive_vcpu
)
229 rc
= xive_provision_queue(vcpu
, prio
);
230 if (rc
== 0 && !xive
->single_escalation
)
231 xive_attach_escalation(vcpu
, prio
);
236 /* Order previous stores and mark it as provisioned */
238 xive
->qmap
|= (1 << prio
);
242 static void xive_inc_q_pending(struct kvm
*kvm
, u32 server
, u8 prio
)
244 struct kvm_vcpu
*vcpu
;
245 struct kvmppc_xive_vcpu
*xc
;
248 /* Locate target server */
249 vcpu
= kvmppc_xive_find_server(kvm
, server
);
251 pr_warn("%s: Can't find server %d\n", __func__
, server
);
254 xc
= vcpu
->arch
.xive_vcpu
;
258 q
= &xc
->queues
[prio
];
259 atomic_inc(&q
->pending_count
);
262 static int xive_try_pick_queue(struct kvm_vcpu
*vcpu
, u8 prio
)
264 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
273 q
= &xc
->queues
[prio
];
274 if (WARN_ON(!q
->qpage
))
277 /* Calculate max number of interrupts in that queue. */
278 max
= (q
->msk
+ 1) - XIVE_Q_GAP
;
279 return atomic_add_unless(&q
->count
, 1, max
) ? 0 : -EBUSY
;
282 static int xive_select_target(struct kvm
*kvm
, u32
*server
, u8 prio
)
284 struct kvm_vcpu
*vcpu
;
287 /* Locate target server */
288 vcpu
= kvmppc_xive_find_server(kvm
, *server
);
290 pr_devel("Can't find server %d\n", *server
);
294 pr_devel("Finding irq target on 0x%x/%d...\n", *server
, prio
);
297 rc
= xive_try_pick_queue(vcpu
, prio
);
301 pr_devel(" .. failed, looking up candidate...\n");
303 /* Failed, pick another VCPU */
304 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
305 if (!vcpu
->arch
.xive_vcpu
)
307 rc
= xive_try_pick_queue(vcpu
, prio
);
309 *server
= vcpu
->arch
.xive_vcpu
->server_num
;
310 pr_devel(" found on 0x%x/%d\n", *server
, prio
);
314 pr_devel(" no available target !\n");
316 /* No available target ! */
320 static u32
xive_vp(struct kvmppc_xive
*xive
, u32 server
)
322 return xive
->vp_base
+ kvmppc_pack_vcpu_id(xive
->kvm
, server
);
325 static u8
xive_lock_and_mask(struct kvmppc_xive
*xive
,
326 struct kvmppc_xive_src_block
*sb
,
327 struct kvmppc_xive_irq_state
*state
)
329 struct xive_irq_data
*xd
;
335 * Take the lock, set masked, try again if racing
339 arch_spin_lock(&sb
->lock
);
340 old_prio
= state
->guest_priority
;
341 state
->guest_priority
= MASKED
;
345 state
->guest_priority
= old_prio
;
346 arch_spin_unlock(&sb
->lock
);
349 /* No change ? Bail */
350 if (old_prio
== MASKED
)
353 /* Get the right irq */
354 kvmppc_xive_select_irq(state
, &hw_num
, &xd
);
357 * If the interrupt is marked as needing masking via
358 * firmware, we do it here. Firmware masking however
359 * is "lossy", it won't return the old p and q bits
360 * and won't set the interrupt to a state where it will
361 * record queued ones. If this is an issue we should do
362 * lazy masking instead.
364 * For now, we work around this in unmask by forcing
365 * an interrupt whenever we unmask a non-LSI via FW
368 if (xd
->flags
& OPAL_XIVE_IRQ_MASK_VIA_FW
) {
369 xive_native_configure_irq(hw_num
,
370 xive_vp(xive
, state
->act_server
),
371 MASKED
, state
->number
);
372 /* set old_p so we can track if an H_EOI was done */
374 state
->old_q
= false;
376 /* Set PQ to 10, return old P and old Q and remember them */
377 val
= xive_vm_esb_load(xd
, XIVE_ESB_SET_PQ_10
);
378 state
->old_p
= !!(val
& 2);
379 state
->old_q
= !!(val
& 1);
382 * Synchronize hardware to sensure the queues are updated
385 xive_native_sync_source(hw_num
);
391 static void xive_lock_for_unmask(struct kvmppc_xive_src_block
*sb
,
392 struct kvmppc_xive_irq_state
*state
)
395 * Take the lock try again if racing with H_EOI
398 arch_spin_lock(&sb
->lock
);
401 arch_spin_unlock(&sb
->lock
);
405 static void xive_finish_unmask(struct kvmppc_xive
*xive
,
406 struct kvmppc_xive_src_block
*sb
,
407 struct kvmppc_xive_irq_state
*state
,
410 struct xive_irq_data
*xd
;
413 /* If we aren't changing a thing, move on */
414 if (state
->guest_priority
!= MASKED
)
417 /* Get the right irq */
418 kvmppc_xive_select_irq(state
, &hw_num
, &xd
);
421 * See command in xive_lock_and_mask() concerning masking
424 if (xd
->flags
& OPAL_XIVE_IRQ_MASK_VIA_FW
) {
425 xive_native_configure_irq(hw_num
,
426 xive_vp(xive
, state
->act_server
),
427 state
->act_priority
, state
->number
);
428 /* If an EOI is needed, do it here */
430 xive_vm_source_eoi(hw_num
, xd
);
431 /* If this is not an LSI, force a trigger */
432 if (!(xd
->flags
& OPAL_XIVE_IRQ_LSI
))
433 xive_irq_trigger(xd
);
437 /* Old Q set, set PQ to 11 */
439 xive_vm_esb_load(xd
, XIVE_ESB_SET_PQ_11
);
442 * If not old P, then perform an "effective" EOI,
443 * on the source. This will handle the cases where
447 xive_vm_source_eoi(hw_num
, xd
);
449 /* Synchronize ordering and mark unmasked */
452 state
->guest_priority
= prio
;
456 * Target an interrupt to a given server/prio, this will fallback
457 * to another server if necessary and perform the HW targetting
460 * NOTE: Must be called with the state lock held
462 static int xive_target_interrupt(struct kvm
*kvm
,
463 struct kvmppc_xive_irq_state
*state
,
466 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
471 * This will return a tentative server and actual
472 * priority. The count for that new target will have
473 * already been incremented.
475 rc
= xive_select_target(kvm
, &server
, prio
);
478 * We failed to find a target ? Not much we can do
479 * at least until we support the GIQ.
485 * Increment the old queue pending count if there
486 * was one so that the old queue count gets adjusted later
487 * when observed to be empty.
489 if (state
->act_priority
!= MASKED
)
490 xive_inc_q_pending(kvm
,
492 state
->act_priority
);
494 * Update state and HW
496 state
->act_priority
= prio
;
497 state
->act_server
= server
;
499 /* Get the right irq */
500 kvmppc_xive_select_irq(state
, &hw_num
, NULL
);
502 return xive_native_configure_irq(hw_num
,
503 xive_vp(xive
, server
),
504 prio
, state
->number
);
508 * Targetting rules: In order to avoid losing track of
509 * pending interrupts accross mask and unmask, which would
510 * allow queue overflows, we implement the following rules:
512 * - Unless it was never enabled (or we run out of capacity)
513 * an interrupt is always targetted at a valid server/queue
514 * pair even when "masked" by the guest. This pair tends to
515 * be the last one used but it can be changed under some
516 * circumstances. That allows us to separate targetting
517 * from masking, we only handle accounting during (re)targetting,
518 * this also allows us to let an interrupt drain into its target
519 * queue after masking, avoiding complex schemes to remove
520 * interrupts out of remote processor queues.
522 * - When masking, we set PQ to 10 and save the previous value
525 * - When unmasking, if saved Q was set, we set PQ to 11
526 * otherwise we leave PQ to the HW state which will be either
527 * 10 if nothing happened or 11 if the interrupt fired while
528 * masked. Effectively we are OR'ing the previous Q into the
531 * Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
532 * which will unmask the interrupt and shoot a new one if Q was
535 * Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11,
536 * effectively meaning an H_EOI from the guest is still expected
537 * for that interrupt).
539 * - If H_EOI occurs while masked, we clear the saved P.
541 * - When changing target, we account on the new target and
542 * increment a separate "pending" counter on the old one.
543 * This pending counter will be used to decrement the old
544 * target's count when its queue has been observed empty.
547 int kvmppc_xive_set_xive(struct kvm
*kvm
, u32 irq
, u32 server
,
550 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
551 struct kvmppc_xive_src_block
*sb
;
552 struct kvmppc_xive_irq_state
*state
;
560 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
561 irq
, server
, priority
);
563 /* First, check provisioning of queues */
564 if (priority
!= MASKED
)
565 rc
= xive_check_provisioning(xive
->kvm
,
566 xive_prio_from_guest(priority
));
568 pr_devel(" provisioning failure %d !\n", rc
);
572 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
575 state
= &sb
->irq_state
[idx
];
578 * We first handle masking/unmasking since the locking
579 * might need to be retried due to EOIs, we'll handle
580 * targetting changes later. These functions will return
581 * with the SB lock held.
583 * xive_lock_and_mask() will also set state->guest_priority
584 * but won't otherwise change other fields of the state.
586 * xive_lock_for_unmask will not actually unmask, this will
587 * be done later by xive_finish_unmask() once the targetting
588 * has been done, so we don't try to unmask an interrupt
589 * that hasn't yet been targetted.
591 if (priority
== MASKED
)
592 xive_lock_and_mask(xive
, sb
, state
);
594 xive_lock_for_unmask(sb
, state
);
598 * Then we handle targetting.
600 * First calculate a new "actual priority"
602 new_act_prio
= state
->act_priority
;
603 if (priority
!= MASKED
)
604 new_act_prio
= xive_prio_from_guest(priority
);
606 pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
607 new_act_prio
, state
->act_server
, state
->act_priority
);
610 * Then check if we actually need to change anything,
612 * The condition for re-targetting the interrupt is that
613 * we have a valid new priority (new_act_prio is not 0xff)
614 * and either the server or the priority changed.
616 * Note: If act_priority was ff and the new priority is
617 * also ff, we don't do anything and leave the interrupt
618 * untargetted. An attempt of doing an int_on on an
619 * untargetted interrupt will fail. If that is a problem
620 * we could initialize interrupts with valid default
623 if (new_act_prio
!= MASKED
&&
624 (state
->act_server
!= server
||
625 state
->act_priority
!= new_act_prio
))
626 rc
= xive_target_interrupt(kvm
, state
, server
, new_act_prio
);
629 * Perform the final unmasking of the interrupt source
632 if (priority
!= MASKED
)
633 xive_finish_unmask(xive
, sb
, state
, priority
);
636 * Finally Update saved_priority to match. Only int_on/off
637 * set this field to a different value.
639 state
->saved_priority
= priority
;
641 arch_spin_unlock(&sb
->lock
);
645 int kvmppc_xive_get_xive(struct kvm
*kvm
, u32 irq
, u32
*server
,
648 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
649 struct kvmppc_xive_src_block
*sb
;
650 struct kvmppc_xive_irq_state
*state
;
656 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
659 state
= &sb
->irq_state
[idx
];
660 arch_spin_lock(&sb
->lock
);
661 *server
= state
->act_server
;
662 *priority
= state
->guest_priority
;
663 arch_spin_unlock(&sb
->lock
);
668 int kvmppc_xive_int_on(struct kvm
*kvm
, u32 irq
)
670 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
671 struct kvmppc_xive_src_block
*sb
;
672 struct kvmppc_xive_irq_state
*state
;
678 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
681 state
= &sb
->irq_state
[idx
];
683 pr_devel("int_on(irq=0x%x)\n", irq
);
686 * Check if interrupt was not targetted
688 if (state
->act_priority
== MASKED
) {
689 pr_devel("int_on on untargetted interrupt\n");
693 /* If saved_priority is 0xff, do nothing */
694 if (state
->saved_priority
== MASKED
)
698 * Lock and unmask it.
700 xive_lock_for_unmask(sb
, state
);
701 xive_finish_unmask(xive
, sb
, state
, state
->saved_priority
);
702 arch_spin_unlock(&sb
->lock
);
707 int kvmppc_xive_int_off(struct kvm
*kvm
, u32 irq
)
709 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
710 struct kvmppc_xive_src_block
*sb
;
711 struct kvmppc_xive_irq_state
*state
;
717 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
720 state
= &sb
->irq_state
[idx
];
722 pr_devel("int_off(irq=0x%x)\n", irq
);
727 state
->saved_priority
= xive_lock_and_mask(xive
, sb
, state
);
728 arch_spin_unlock(&sb
->lock
);
733 static bool xive_restore_pending_irq(struct kvmppc_xive
*xive
, u32 irq
)
735 struct kvmppc_xive_src_block
*sb
;
736 struct kvmppc_xive_irq_state
*state
;
739 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
742 state
= &sb
->irq_state
[idx
];
747 * Trigger the IPI. This assumes we never restore a pass-through
748 * interrupt which should be safe enough
750 xive_irq_trigger(&state
->ipi_data
);
755 u64
kvmppc_xive_get_icp(struct kvm_vcpu
*vcpu
)
757 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
762 /* Return the per-cpu state for state saving/migration */
763 return (u64
)xc
->cppr
<< KVM_REG_PPC_ICP_CPPR_SHIFT
|
764 (u64
)xc
->mfrr
<< KVM_REG_PPC_ICP_MFRR_SHIFT
|
765 (u64
)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT
;
768 int kvmppc_xive_set_icp(struct kvm_vcpu
*vcpu
, u64 icpval
)
770 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
771 struct kvmppc_xive
*xive
= vcpu
->kvm
->arch
.xive
;
778 /* Grab individual state fields. We don't use pending_pri */
779 cppr
= icpval
>> KVM_REG_PPC_ICP_CPPR_SHIFT
;
780 xisr
= (icpval
>> KVM_REG_PPC_ICP_XISR_SHIFT
) &
781 KVM_REG_PPC_ICP_XISR_MASK
;
782 mfrr
= icpval
>> KVM_REG_PPC_ICP_MFRR_SHIFT
;
784 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
785 xc
->server_num
, cppr
, mfrr
, xisr
);
788 * We can't update the state of a "pushed" VCPU, but that
791 if (WARN_ON(vcpu
->arch
.xive_pushed
))
794 /* Update VCPU HW saved state */
795 vcpu
->arch
.xive_saved_state
.cppr
= cppr
;
796 xc
->hw_cppr
= xc
->cppr
= cppr
;
799 * Update MFRR state. If it's not 0xff, we mark the VCPU as
800 * having a pending MFRR change, which will re-evaluate the
801 * target. The VCPU will thus potentially get a spurious
802 * interrupt but that's not a big deal.
806 xive_irq_trigger(&xc
->vp_ipi_data
);
809 * Now saved XIRR is "interesting". It means there's something in
810 * the legacy "1 element" queue... for an IPI we simply ignore it,
811 * as the MFRR restore will handle that. For anything else we need
812 * to force a resend of the source.
813 * However the source may not have been setup yet. If that's the
814 * case, we keep that info and increment a counter in the xive to
815 * tell subsequent xive_set_source() to go look.
817 if (xisr
> XICS_IPI
&& !xive_restore_pending_irq(xive
, xisr
)) {
818 xc
->delayed_irq
= xisr
;
819 xive
->delayed_irqs
++;
820 pr_devel(" xisr restore delayed\n");
826 int kvmppc_xive_set_mapped(struct kvm
*kvm
, unsigned long guest_irq
,
827 struct irq_desc
*host_desc
)
829 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
830 struct kvmppc_xive_src_block
*sb
;
831 struct kvmppc_xive_irq_state
*state
;
832 struct irq_data
*host_data
= irq_desc_get_irq_data(host_desc
);
833 unsigned int host_irq
= irq_desc_get_irq(host_desc
);
834 unsigned int hw_irq
= (unsigned int)irqd_to_hwirq(host_data
);
842 pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq
, hw_irq
);
844 sb
= kvmppc_xive_find_source(xive
, guest_irq
, &idx
);
847 state
= &sb
->irq_state
[idx
];
850 * Mark the passed-through interrupt as going to a VCPU,
851 * this will prevent further EOIs and similar operations
852 * from the XIVE code. It will also mask the interrupt
853 * to either PQ=10 or 11 state, the latter if the interrupt
854 * is pending. This will allow us to unmask or retrigger it
855 * after routing it to the guest with a simple EOI.
857 * The "state" argument is a "token", all it needs is to be
858 * non-NULL to switch to passed-through or NULL for the
859 * other way around. We may not yet have an actual VCPU
860 * target here and we don't really care.
862 rc
= irq_set_vcpu_affinity(host_irq
, state
);
864 pr_err("Failed to set VCPU affinity for irq %d\n", host_irq
);
869 * Mask and read state of IPI. We need to know if its P bit
870 * is set as that means it's potentially already using a
871 * queue entry in the target
873 prio
= xive_lock_and_mask(xive
, sb
, state
);
874 pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio
,
875 state
->old_p
, state
->old_q
);
877 /* Turn the IPI hard off */
878 xive_vm_esb_load(&state
->ipi_data
, XIVE_ESB_SET_PQ_01
);
880 /* Grab info about irq */
881 state
->pt_number
= hw_irq
;
882 state
->pt_data
= irq_data_get_irq_handler_data(host_data
);
885 * Configure the IRQ to match the existing configuration of
886 * the IPI if it was already targetted. Otherwise this will
887 * mask the interrupt in a lossy way (act_priority is 0xff)
888 * which is fine for a never started interrupt.
890 xive_native_configure_irq(hw_irq
,
891 xive_vp(xive
, state
->act_server
),
892 state
->act_priority
, state
->number
);
895 * We do an EOI to enable the interrupt (and retrigger if needed)
896 * if the guest has the interrupt unmasked and the P bit was *not*
897 * set in the IPI. If it was set, we know a slot may still be in
898 * use in the target queue thus we have to wait for a guest
901 if (prio
!= MASKED
&& !state
->old_p
)
902 xive_vm_source_eoi(hw_irq
, state
->pt_data
);
904 /* Clear old_p/old_q as they are no longer relevant */
905 state
->old_p
= state
->old_q
= false;
907 /* Restore guest prio (unlocks EOI) */
909 state
->guest_priority
= prio
;
910 arch_spin_unlock(&sb
->lock
);
914 EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped
);
916 int kvmppc_xive_clr_mapped(struct kvm
*kvm
, unsigned long guest_irq
,
917 struct irq_desc
*host_desc
)
919 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
920 struct kvmppc_xive_src_block
*sb
;
921 struct kvmppc_xive_irq_state
*state
;
922 unsigned int host_irq
= irq_desc_get_irq(host_desc
);
930 pr_devel("clr_mapped girq 0x%lx...\n", guest_irq
);
932 sb
= kvmppc_xive_find_source(xive
, guest_irq
, &idx
);
935 state
= &sb
->irq_state
[idx
];
938 * Mask and read state of IRQ. We need to know if its P bit
939 * is set as that means it's potentially already using a
940 * queue entry in the target
942 prio
= xive_lock_and_mask(xive
, sb
, state
);
943 pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio
,
944 state
->old_p
, state
->old_q
);
947 * If old_p is set, the interrupt is pending, we switch it to
948 * PQ=11. This will force a resend in the host so the interrupt
949 * isn't lost to whatver host driver may pick it up
952 xive_vm_esb_load(state
->pt_data
, XIVE_ESB_SET_PQ_11
);
954 /* Release the passed-through interrupt to the host */
955 rc
= irq_set_vcpu_affinity(host_irq
, NULL
);
957 pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq
);
961 /* Forget about the IRQ */
962 state
->pt_number
= 0;
963 state
->pt_data
= NULL
;
965 /* Reconfigure the IPI */
966 xive_native_configure_irq(state
->ipi_number
,
967 xive_vp(xive
, state
->act_server
),
968 state
->act_priority
, state
->number
);
971 * If old_p is set (we have a queue entry potentially
972 * occupied) or the interrupt is masked, we set the IPI
973 * to PQ=10 state. Otherwise we just re-enable it (PQ=00).
975 if (prio
== MASKED
|| state
->old_p
)
976 xive_vm_esb_load(&state
->ipi_data
, XIVE_ESB_SET_PQ_10
);
978 xive_vm_esb_load(&state
->ipi_data
, XIVE_ESB_SET_PQ_00
);
980 /* Restore guest prio (unlocks EOI) */
982 state
->guest_priority
= prio
;
983 arch_spin_unlock(&sb
->lock
);
987 EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped
);
989 static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu
*vcpu
)
991 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
992 struct kvm
*kvm
= vcpu
->kvm
;
993 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
996 for (i
= 0; i
<= xive
->max_sbid
; i
++) {
997 struct kvmppc_xive_src_block
*sb
= xive
->src_blocks
[i
];
1001 for (j
= 0; j
< KVMPPC_XICS_IRQ_PER_ICS
; j
++) {
1002 struct kvmppc_xive_irq_state
*state
= &sb
->irq_state
[j
];
1006 if (state
->act_priority
== MASKED
)
1008 if (state
->act_server
!= xc
->server_num
)
1012 arch_spin_lock(&sb
->lock
);
1013 state
->act_priority
= MASKED
;
1014 xive_vm_esb_load(&state
->ipi_data
, XIVE_ESB_SET_PQ_01
);
1015 xive_native_configure_irq(state
->ipi_number
, 0, MASKED
, 0);
1016 if (state
->pt_number
) {
1017 xive_vm_esb_load(state
->pt_data
, XIVE_ESB_SET_PQ_01
);
1018 xive_native_configure_irq(state
->pt_number
, 0, MASKED
, 0);
1020 arch_spin_unlock(&sb
->lock
);
1025 void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu
*vcpu
)
1027 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
1028 struct kvmppc_xive
*xive
= xc
->xive
;
1031 pr_devel("cleanup_vcpu(cpu=%d)\n", xc
->server_num
);
1033 /* Ensure no interrupt is still routed to that VP */
1035 kvmppc_xive_disable_vcpu_interrupts(vcpu
);
1037 /* Mask the VP IPI */
1038 xive_vm_esb_load(&xc
->vp_ipi_data
, XIVE_ESB_SET_PQ_01
);
1040 /* Disable the VP */
1041 xive_native_disable_vp(xc
->vp_id
);
1043 /* Free the queues & associated interrupts */
1044 for (i
= 0; i
< KVMPPC_XIVE_Q_COUNT
; i
++) {
1045 struct xive_q
*q
= &xc
->queues
[i
];
1047 /* Free the escalation irq */
1048 if (xc
->esc_virq
[i
]) {
1049 free_irq(xc
->esc_virq
[i
], vcpu
);
1050 irq_dispose_mapping(xc
->esc_virq
[i
]);
1051 kfree(xc
->esc_virq_names
[i
]);
1053 /* Free the queue */
1054 xive_native_disable_queue(xc
->vp_id
, q
, i
);
1056 free_pages((unsigned long)q
->qpage
,
1057 xive
->q_page_order
);
1064 xive_cleanup_irq_data(&xc
->vp_ipi_data
);
1065 xive_native_free_irq(xc
->vp_ipi
);
1071 int kvmppc_xive_connect_vcpu(struct kvm_device
*dev
,
1072 struct kvm_vcpu
*vcpu
, u32 cpu
)
1074 struct kvmppc_xive
*xive
= dev
->private;
1075 struct kvmppc_xive_vcpu
*xc
;
1078 pr_devel("connect_vcpu(cpu=%d)\n", cpu
);
1080 if (dev
->ops
!= &kvm_xive_ops
) {
1081 pr_devel("Wrong ops !\n");
1084 if (xive
->kvm
!= vcpu
->kvm
)
1086 if (vcpu
->arch
.irq_type
)
1088 if (kvmppc_xive_find_server(vcpu
->kvm
, cpu
)) {
1089 pr_devel("Duplicate !\n");
1092 if (cpu
>= (KVM_MAX_VCPUS
* vcpu
->kvm
->arch
.emul_smt_mode
)) {
1093 pr_devel("Out of bounds !\n");
1096 xc
= kzalloc(sizeof(*xc
), GFP_KERNEL
);
1100 /* We need to synchronize with queue provisioning */
1101 mutex_lock(&vcpu
->kvm
->lock
);
1102 vcpu
->arch
.xive_vcpu
= xc
;
1105 xc
->server_num
= cpu
;
1106 xc
->vp_id
= xive_vp(xive
, cpu
);
1110 r
= xive_native_get_vp_info(xc
->vp_id
, &xc
->vp_cam
, &xc
->vp_chip_id
);
1114 /* Configure VCPU fields for use by assembly push/pull */
1115 vcpu
->arch
.xive_saved_state
.w01
= cpu_to_be64(0xff000000);
1116 vcpu
->arch
.xive_cam_word
= cpu_to_be32(xc
->vp_cam
| TM_QW1W2_VO
);
1119 xc
->vp_ipi
= xive_native_alloc_irq();
1121 pr_err("Failed to allocate xive irq for VCPU IPI\n");
1125 pr_devel(" IPI=0x%x\n", xc
->vp_ipi
);
1127 r
= xive_native_populate_irq_data(xc
->vp_ipi
, &xc
->vp_ipi_data
);
1132 * Enable the VP first as the single escalation mode will
1133 * affect escalation interrupts numbering
1135 r
= xive_native_enable_vp(xc
->vp_id
, xive
->single_escalation
);
1137 pr_err("Failed to enable VP in OPAL, err %d\n", r
);
1142 * Initialize queues. Initially we set them all for no queueing
1143 * and we enable escalation for queue 0 only which we'll use for
1144 * our mfrr change notifications. If the VCPU is hot-plugged, we
1145 * do handle provisioning however based on the existing "map"
1146 * of enabled queues.
1148 for (i
= 0; i
< KVMPPC_XIVE_Q_COUNT
; i
++) {
1149 struct xive_q
*q
= &xc
->queues
[i
];
1151 /* Single escalation, no queue 7 */
1152 if (i
== 7 && xive
->single_escalation
)
1155 /* Is queue already enabled ? Provision it */
1156 if (xive
->qmap
& (1 << i
)) {
1157 r
= xive_provision_queue(vcpu
, i
);
1158 if (r
== 0 && !xive
->single_escalation
)
1159 xive_attach_escalation(vcpu
, i
);
1163 r
= xive_native_configure_queue(xc
->vp_id
,
1164 q
, i
, NULL
, 0, true);
1166 pr_err("Failed to configure queue %d for VCPU %d\n",
1173 /* If not done above, attach priority 0 escalation */
1174 r
= xive_attach_escalation(vcpu
, 0);
1179 r
= xive_native_configure_irq(xc
->vp_ipi
, xc
->vp_id
, 0, XICS_IPI
);
1181 xive_vm_esb_load(&xc
->vp_ipi_data
, XIVE_ESB_SET_PQ_00
);
1184 mutex_unlock(&vcpu
->kvm
->lock
);
1186 kvmppc_xive_cleanup_vcpu(vcpu
);
1190 vcpu
->arch
.irq_type
= KVMPPC_IRQ_XICS
;
1195 * Scanning of queues before/after migration save
1197 static void xive_pre_save_set_queued(struct kvmppc_xive
*xive
, u32 irq
)
1199 struct kvmppc_xive_src_block
*sb
;
1200 struct kvmppc_xive_irq_state
*state
;
1203 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
1207 state
= &sb
->irq_state
[idx
];
1209 /* Some sanity checking */
1210 if (!state
->valid
) {
1211 pr_err("invalid irq 0x%x in cpu queue!\n", irq
);
1216 * If the interrupt is in a queue it should have P set.
1217 * We warn so that gets reported. A backtrace isn't useful
1218 * so no need to use a WARN_ON.
1220 if (!state
->saved_p
)
1221 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq
);
1224 state
->in_queue
= true;
1227 static void xive_pre_save_mask_irq(struct kvmppc_xive
*xive
,
1228 struct kvmppc_xive_src_block
*sb
,
1231 struct kvmppc_xive_irq_state
*state
= &sb
->irq_state
[irq
];
1236 /* Mask and save state, this will also sync HW queues */
1237 state
->saved_scan_prio
= xive_lock_and_mask(xive
, sb
, state
);
1239 /* Transfer P and Q */
1240 state
->saved_p
= state
->old_p
;
1241 state
->saved_q
= state
->old_q
;
1244 arch_spin_unlock(&sb
->lock
);
1247 static void xive_pre_save_unmask_irq(struct kvmppc_xive
*xive
,
1248 struct kvmppc_xive_src_block
*sb
,
1251 struct kvmppc_xive_irq_state
*state
= &sb
->irq_state
[irq
];
1257 * Lock / exclude EOI (not technically necessary if the
1258 * guest isn't running concurrently. If this becomes a
1259 * performance issue we can probably remove the lock.
1261 xive_lock_for_unmask(sb
, state
);
1263 /* Restore mask/prio if it wasn't masked */
1264 if (state
->saved_scan_prio
!= MASKED
)
1265 xive_finish_unmask(xive
, sb
, state
, state
->saved_scan_prio
);
1268 arch_spin_unlock(&sb
->lock
);
1271 static void xive_pre_save_queue(struct kvmppc_xive
*xive
, struct xive_q
*q
)
1274 u32 toggle
= q
->toggle
;
1278 irq
= __xive_read_eq(q
->qpage
, q
->msk
, &idx
, &toggle
);
1280 xive_pre_save_set_queued(xive
, irq
);
1284 static void xive_pre_save_scan(struct kvmppc_xive
*xive
)
1286 struct kvm_vcpu
*vcpu
= NULL
;
1290 * See comment in xive_get_source() about how this
1291 * work. Collect a stable state for all interrupts
1293 for (i
= 0; i
<= xive
->max_sbid
; i
++) {
1294 struct kvmppc_xive_src_block
*sb
= xive
->src_blocks
[i
];
1297 for (j
= 0; j
< KVMPPC_XICS_IRQ_PER_ICS
; j
++)
1298 xive_pre_save_mask_irq(xive
, sb
, j
);
1301 /* Then scan the queues and update the "in_queue" flag */
1302 kvm_for_each_vcpu(i
, vcpu
, xive
->kvm
) {
1303 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
1306 for (j
= 0; j
< KVMPPC_XIVE_Q_COUNT
; j
++) {
1307 if (xc
->queues
[j
].qpage
)
1308 xive_pre_save_queue(xive
, &xc
->queues
[j
]);
1312 /* Finally restore interrupt states */
1313 for (i
= 0; i
<= xive
->max_sbid
; i
++) {
1314 struct kvmppc_xive_src_block
*sb
= xive
->src_blocks
[i
];
1317 for (j
= 0; j
< KVMPPC_XICS_IRQ_PER_ICS
; j
++)
1318 xive_pre_save_unmask_irq(xive
, sb
, j
);
1322 static void xive_post_save_scan(struct kvmppc_xive
*xive
)
1326 /* Clear all the in_queue flags */
1327 for (i
= 0; i
<= xive
->max_sbid
; i
++) {
1328 struct kvmppc_xive_src_block
*sb
= xive
->src_blocks
[i
];
1331 for (j
= 0; j
< KVMPPC_XICS_IRQ_PER_ICS
; j
++)
1332 sb
->irq_state
[j
].in_queue
= false;
1335 /* Next get_source() will do a new scan */
1336 xive
->saved_src_count
= 0;
1340 * This returns the source configuration and state to user space.
1342 static int xive_get_source(struct kvmppc_xive
*xive
, long irq
, u64 addr
)
1344 struct kvmppc_xive_src_block
*sb
;
1345 struct kvmppc_xive_irq_state
*state
;
1346 u64 __user
*ubufp
= (u64 __user
*) addr
;
1350 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
1354 state
= &sb
->irq_state
[idx
];
1359 pr_devel("get_source(%ld)...\n", irq
);
1362 * So to properly save the state into something that looks like a
1363 * XICS migration stream we cannot treat interrupts individually.
1365 * We need, instead, mask them all (& save their previous PQ state)
1366 * to get a stable state in the HW, then sync them to ensure that
1367 * any interrupt that had already fired hits its queue, and finally
1368 * scan all the queues to collect which interrupts are still present
1369 * in the queues, so we can set the "pending" flag on them and
1370 * they can be resent on restore.
1372 * So we do it all when the "first" interrupt gets saved, all the
1373 * state is collected at that point, the rest of xive_get_source()
1374 * will merely collect and convert that state to the expected
1375 * userspace bit mask.
1377 if (xive
->saved_src_count
== 0)
1378 xive_pre_save_scan(xive
);
1379 xive
->saved_src_count
++;
1381 /* Convert saved state into something compatible with xics */
1382 val
= state
->act_server
;
1383 prio
= state
->saved_scan_prio
;
1385 if (prio
== MASKED
) {
1386 val
|= KVM_XICS_MASKED
;
1387 prio
= state
->saved_priority
;
1389 val
|= prio
<< KVM_XICS_PRIORITY_SHIFT
;
1391 val
|= KVM_XICS_LEVEL_SENSITIVE
;
1393 val
|= KVM_XICS_PENDING
;
1396 val
|= KVM_XICS_PRESENTED
;
1399 val
|= KVM_XICS_QUEUED
;
1402 * We mark it pending (which will attempt a re-delivery)
1403 * if we are in a queue *or* we were masked and had
1404 * Q set which is equivalent to the XICS "masked pending"
1407 if (state
->in_queue
|| (prio
== MASKED
&& state
->saved_q
))
1408 val
|= KVM_XICS_PENDING
;
1412 * If that was the last interrupt saved, reset the
1415 if (xive
->saved_src_count
== xive
->src_count
)
1416 xive_post_save_scan(xive
);
1418 /* Copy the result to userspace */
1419 if (put_user(val
, ubufp
))
1425 static struct kvmppc_xive_src_block
*xive_create_src_block(struct kvmppc_xive
*xive
,
1428 struct kvm
*kvm
= xive
->kvm
;
1429 struct kvmppc_xive_src_block
*sb
;
1432 bid
= irq
>> KVMPPC_XICS_ICS_SHIFT
;
1434 mutex_lock(&kvm
->lock
);
1436 /* block already exists - somebody else got here first */
1437 if (xive
->src_blocks
[bid
])
1440 /* Create the ICS */
1441 sb
= kzalloc(sizeof(*sb
), GFP_KERNEL
);
1447 for (i
= 0; i
< KVMPPC_XICS_IRQ_PER_ICS
; i
++) {
1448 sb
->irq_state
[i
].number
= (bid
<< KVMPPC_XICS_ICS_SHIFT
) | i
;
1449 sb
->irq_state
[i
].guest_priority
= MASKED
;
1450 sb
->irq_state
[i
].saved_priority
= MASKED
;
1451 sb
->irq_state
[i
].act_priority
= MASKED
;
1454 xive
->src_blocks
[bid
] = sb
;
1456 if (bid
> xive
->max_sbid
)
1457 xive
->max_sbid
= bid
;
1460 mutex_unlock(&kvm
->lock
);
1461 return xive
->src_blocks
[bid
];
1464 static bool xive_check_delayed_irq(struct kvmppc_xive
*xive
, u32 irq
)
1466 struct kvm
*kvm
= xive
->kvm
;
1467 struct kvm_vcpu
*vcpu
= NULL
;
1470 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1471 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
1476 if (xc
->delayed_irq
== irq
) {
1477 xc
->delayed_irq
= 0;
1478 xive
->delayed_irqs
--;
1485 static int xive_set_source(struct kvmppc_xive
*xive
, long irq
, u64 addr
)
1487 struct kvmppc_xive_src_block
*sb
;
1488 struct kvmppc_xive_irq_state
*state
;
1489 u64 __user
*ubufp
= (u64 __user
*) addr
;
1492 u8 act_prio
, guest_prio
;
1496 if (irq
< KVMPPC_XICS_FIRST_IRQ
|| irq
>= KVMPPC_XICS_NR_IRQS
)
1499 pr_devel("set_source(irq=0x%lx)\n", irq
);
1501 /* Find the source */
1502 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
1504 pr_devel("No source, creating source block...\n");
1505 sb
= xive_create_src_block(xive
, irq
);
1507 pr_devel("Failed to create block...\n");
1511 state
= &sb
->irq_state
[idx
];
1513 /* Read user passed data */
1514 if (get_user(val
, ubufp
)) {
1515 pr_devel("fault getting user info !\n");
1519 server
= val
& KVM_XICS_DESTINATION_MASK
;
1520 guest_prio
= val
>> KVM_XICS_PRIORITY_SHIFT
;
1522 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
1523 val
, server
, guest_prio
);
1526 * If the source doesn't already have an IPI, allocate
1527 * one and get the corresponding data
1529 if (!state
->ipi_number
) {
1530 state
->ipi_number
= xive_native_alloc_irq();
1531 if (state
->ipi_number
== 0) {
1532 pr_devel("Failed to allocate IPI !\n");
1535 xive_native_populate_irq_data(state
->ipi_number
, &state
->ipi_data
);
1536 pr_devel(" src_ipi=0x%x\n", state
->ipi_number
);
1540 * We use lock_and_mask() to set us in the right masked
1541 * state. We will override that state from the saved state
1542 * further down, but this will handle the cases of interrupts
1543 * that need FW masking. We set the initial guest_priority to
1544 * 0 before calling it to ensure it actually performs the masking.
1546 state
->guest_priority
= 0;
1547 xive_lock_and_mask(xive
, sb
, state
);
1550 * Now, we select a target if we have one. If we don't we
1551 * leave the interrupt untargetted. It means that an interrupt
1552 * can become "untargetted" accross migration if it was masked
1553 * by set_xive() but there is little we can do about it.
1556 /* First convert prio and mark interrupt as untargetted */
1557 act_prio
= xive_prio_from_guest(guest_prio
);
1558 state
->act_priority
= MASKED
;
1561 * We need to drop the lock due to the mutex below. Hopefully
1562 * nothing is touching that interrupt yet since it hasn't been
1563 * advertized to a running guest yet
1565 arch_spin_unlock(&sb
->lock
);
1567 /* If we have a priority target the interrupt */
1568 if (act_prio
!= MASKED
) {
1569 /* First, check provisioning of queues */
1570 mutex_lock(&xive
->kvm
->lock
);
1571 rc
= xive_check_provisioning(xive
->kvm
, act_prio
);
1572 mutex_unlock(&xive
->kvm
->lock
);
1574 /* Target interrupt */
1576 rc
= xive_target_interrupt(xive
->kvm
, state
,
1579 * If provisioning or targetting failed, leave it
1580 * alone and masked. It will remain disabled until
1581 * the guest re-targets it.
1586 * Find out if this was a delayed irq stashed in an ICP,
1587 * in which case, treat it as pending
1589 if (xive
->delayed_irqs
&& xive_check_delayed_irq(xive
, irq
)) {
1590 val
|= KVM_XICS_PENDING
;
1591 pr_devel(" Found delayed ! forcing PENDING !\n");
1594 /* Cleanup the SW state */
1595 state
->old_p
= false;
1596 state
->old_q
= false;
1598 state
->asserted
= false;
1600 /* Restore LSI state */
1601 if (val
& KVM_XICS_LEVEL_SENSITIVE
) {
1603 if (val
& KVM_XICS_PENDING
)
1604 state
->asserted
= true;
1605 pr_devel(" LSI ! Asserted=%d\n", state
->asserted
);
1609 * Restore P and Q. If the interrupt was pending, we
1610 * force Q and !P, which will trigger a resend.
1612 * That means that a guest that had both an interrupt
1613 * pending (queued) and Q set will restore with only
1614 * one instance of that interrupt instead of 2, but that
1615 * is perfectly fine as coalescing interrupts that haven't
1616 * been presented yet is always allowed.
1618 if (val
& KVM_XICS_PRESENTED
&& !(val
& KVM_XICS_PENDING
))
1619 state
->old_p
= true;
1620 if (val
& KVM_XICS_QUEUED
|| val
& KVM_XICS_PENDING
)
1621 state
->old_q
= true;
1623 pr_devel(" P=%d, Q=%d\n", state
->old_p
, state
->old_q
);
1626 * If the interrupt was unmasked, update guest priority and
1627 * perform the appropriate state transition and do a
1628 * re-trigger if necessary.
1630 if (val
& KVM_XICS_MASKED
) {
1631 pr_devel(" masked, saving prio\n");
1632 state
->guest_priority
= MASKED
;
1633 state
->saved_priority
= guest_prio
;
1635 pr_devel(" unmasked, restoring to prio %d\n", guest_prio
);
1636 xive_finish_unmask(xive
, sb
, state
, guest_prio
);
1637 state
->saved_priority
= guest_prio
;
1640 /* Increment the number of valid sources and mark this one valid */
1643 state
->valid
= true;
1648 int kvmppc_xive_set_irq(struct kvm
*kvm
, int irq_source_id
, u32 irq
, int level
,
1651 struct kvmppc_xive
*xive
= kvm
->arch
.xive
;
1652 struct kvmppc_xive_src_block
*sb
;
1653 struct kvmppc_xive_irq_state
*state
;
1659 sb
= kvmppc_xive_find_source(xive
, irq
, &idx
);
1663 /* Perform locklessly .... (we need to do some RCUisms here...) */
1664 state
= &sb
->irq_state
[idx
];
1668 /* We don't allow a trigger on a passed-through interrupt */
1669 if (state
->pt_number
)
1672 if ((level
== 1 && state
->lsi
) || level
== KVM_INTERRUPT_SET_LEVEL
)
1673 state
->asserted
= 1;
1674 else if (level
== 0 || level
== KVM_INTERRUPT_UNSET
) {
1675 state
->asserted
= 0;
1679 /* Trigger the IPI */
1680 xive_irq_trigger(&state
->ipi_data
);
1685 static int xive_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1687 struct kvmppc_xive
*xive
= dev
->private;
1689 /* We honor the existing XICS ioctl */
1690 switch (attr
->group
) {
1691 case KVM_DEV_XICS_GRP_SOURCES
:
1692 return xive_set_source(xive
, attr
->attr
, attr
->addr
);
1697 static int xive_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1699 struct kvmppc_xive
*xive
= dev
->private;
1701 /* We honor the existing XICS ioctl */
1702 switch (attr
->group
) {
1703 case KVM_DEV_XICS_GRP_SOURCES
:
1704 return xive_get_source(xive
, attr
->attr
, attr
->addr
);
1709 static int xive_has_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1711 /* We honor the same limits as XICS, at least for now */
1712 switch (attr
->group
) {
1713 case KVM_DEV_XICS_GRP_SOURCES
:
1714 if (attr
->attr
>= KVMPPC_XICS_FIRST_IRQ
&&
1715 attr
->attr
< KVMPPC_XICS_NR_IRQS
)
1722 static void kvmppc_xive_cleanup_irq(u32 hw_num
, struct xive_irq_data
*xd
)
1724 xive_vm_esb_load(xd
, XIVE_ESB_SET_PQ_01
);
1725 xive_native_configure_irq(hw_num
, 0, MASKED
, 0);
1726 xive_cleanup_irq_data(xd
);
1729 static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block
*sb
)
1733 for (i
= 0; i
< KVMPPC_XICS_IRQ_PER_ICS
; i
++) {
1734 struct kvmppc_xive_irq_state
*state
= &sb
->irq_state
[i
];
1739 kvmppc_xive_cleanup_irq(state
->ipi_number
, &state
->ipi_data
);
1740 xive_native_free_irq(state
->ipi_number
);
1742 /* Pass-through, cleanup too */
1743 if (state
->pt_number
)
1744 kvmppc_xive_cleanup_irq(state
->pt_number
, state
->pt_data
);
1746 state
->valid
= false;
1750 static void kvmppc_xive_free(struct kvm_device
*dev
)
1752 struct kvmppc_xive
*xive
= dev
->private;
1753 struct kvm
*kvm
= xive
->kvm
;
1756 debugfs_remove(xive
->dentry
);
1759 kvm
->arch
.xive
= NULL
;
1761 /* Mask and free interrupts */
1762 for (i
= 0; i
<= xive
->max_sbid
; i
++) {
1763 if (xive
->src_blocks
[i
])
1764 kvmppc_xive_free_sources(xive
->src_blocks
[i
]);
1765 kfree(xive
->src_blocks
[i
]);
1766 xive
->src_blocks
[i
] = NULL
;
1769 if (xive
->vp_base
!= XIVE_INVALID_VP
)
1770 xive_native_free_vp_block(xive
->vp_base
);
1777 static int kvmppc_xive_create(struct kvm_device
*dev
, u32 type
)
1779 struct kvmppc_xive
*xive
;
1780 struct kvm
*kvm
= dev
->kvm
;
1783 pr_devel("Creating xive for partition\n");
1785 xive
= kzalloc(sizeof(*xive
), GFP_KERNEL
);
1789 dev
->private = xive
;
1793 /* Already there ? */
1797 kvm
->arch
.xive
= xive
;
1799 /* We use the default queue size set by the host */
1800 xive
->q_order
= xive_native_default_eq_shift();
1801 if (xive
->q_order
< PAGE_SHIFT
)
1802 xive
->q_page_order
= 0;
1804 xive
->q_page_order
= xive
->q_order
- PAGE_SHIFT
;
1806 /* Allocate a bunch of VPs */
1807 xive
->vp_base
= xive_native_alloc_vp_block(KVM_MAX_VCPUS
);
1808 pr_devel("VP_Base=%x\n", xive
->vp_base
);
1810 if (xive
->vp_base
== XIVE_INVALID_VP
)
1813 xive
->single_escalation
= xive_native_has_single_escalation();
1824 static int xive_debug_show(struct seq_file
*m
, void *private)
1826 struct kvmppc_xive
*xive
= m
->private;
1827 struct kvm
*kvm
= xive
->kvm
;
1828 struct kvm_vcpu
*vcpu
;
1829 u64 t_rm_h_xirr
= 0;
1830 u64 t_rm_h_ipoll
= 0;
1831 u64 t_rm_h_cppr
= 0;
1834 u64 t_vm_h_xirr
= 0;
1835 u64 t_vm_h_ipoll
= 0;
1836 u64 t_vm_h_cppr
= 0;
1844 seq_printf(m
, "=========\nVCPU state\n=========\n");
1846 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1847 struct kvmppc_xive_vcpu
*xc
= vcpu
->arch
.xive_vcpu
;
1853 seq_printf(m
, "cpu server %#x CPPR:%#x HWCPPR:%#x"
1854 " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
1855 xc
->server_num
, xc
->cppr
, xc
->hw_cppr
,
1856 xc
->mfrr
, xc
->pending
,
1857 xc
->stat_rm_h_xirr
, xc
->stat_vm_h_xirr
);
1858 for (i
= 0; i
< KVMPPC_XIVE_Q_COUNT
; i
++) {
1859 struct xive_q
*q
= &xc
->queues
[i
];
1862 if (!q
->qpage
&& !xc
->esc_virq
[i
])
1865 seq_printf(m
, " [q%d]: ", i
);
1869 i0
= be32_to_cpup(q
->qpage
+ idx
);
1870 idx
= (idx
+ 1) & q
->msk
;
1871 i1
= be32_to_cpup(q
->qpage
+ idx
);
1872 seq_printf(m
, "T=%d %08x %08x... \n", q
->toggle
, i0
, i1
);
1874 if (xc
->esc_virq
[i
]) {
1875 struct irq_data
*d
= irq_get_irq_data(xc
->esc_virq
[i
]);
1876 struct xive_irq_data
*xd
= irq_data_get_irq_handler_data(d
);
1877 u64 pq
= xive_vm_esb_load(xd
, XIVE_ESB_GET
);
1878 seq_printf(m
, "E:%c%c I(%d:%llx:%llx)",
1879 (pq
& XIVE_ESB_VAL_P
) ? 'P' : 'p',
1880 (pq
& XIVE_ESB_VAL_Q
) ? 'Q' : 'q',
1881 xc
->esc_virq
[i
], pq
, xd
->eoi_page
);
1882 seq_printf(m
, "\n");
1886 t_rm_h_xirr
+= xc
->stat_rm_h_xirr
;
1887 t_rm_h_ipoll
+= xc
->stat_rm_h_ipoll
;
1888 t_rm_h_cppr
+= xc
->stat_rm_h_cppr
;
1889 t_rm_h_eoi
+= xc
->stat_rm_h_eoi
;
1890 t_rm_h_ipi
+= xc
->stat_rm_h_ipi
;
1891 t_vm_h_xirr
+= xc
->stat_vm_h_xirr
;
1892 t_vm_h_ipoll
+= xc
->stat_vm_h_ipoll
;
1893 t_vm_h_cppr
+= xc
->stat_vm_h_cppr
;
1894 t_vm_h_eoi
+= xc
->stat_vm_h_eoi
;
1895 t_vm_h_ipi
+= xc
->stat_vm_h_ipi
;
1898 seq_printf(m
, "Hcalls totals\n");
1899 seq_printf(m
, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr
, t_vm_h_xirr
);
1900 seq_printf(m
, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll
, t_vm_h_ipoll
);
1901 seq_printf(m
, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr
, t_vm_h_cppr
);
1902 seq_printf(m
, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi
, t_vm_h_eoi
);
1903 seq_printf(m
, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi
, t_vm_h_ipi
);
1908 static int xive_debug_open(struct inode
*inode
, struct file
*file
)
1910 return single_open(file
, xive_debug_show
, inode
->i_private
);
1913 static const struct file_operations xive_debug_fops
= {
1914 .open
= xive_debug_open
,
1916 .llseek
= seq_lseek
,
1917 .release
= single_release
,
1920 static void xive_debugfs_init(struct kvmppc_xive
*xive
)
1924 name
= kasprintf(GFP_KERNEL
, "kvm-xive-%p", xive
);
1926 pr_err("%s: no memory for name\n", __func__
);
1930 xive
->dentry
= debugfs_create_file(name
, S_IRUGO
, powerpc_debugfs_root
,
1931 xive
, &xive_debug_fops
);
1933 pr_debug("%s: created %s\n", __func__
, name
);
1937 static void kvmppc_xive_init(struct kvm_device
*dev
)
1939 struct kvmppc_xive
*xive
= (struct kvmppc_xive
*)dev
->private;
1941 /* Register some debug interfaces */
1942 xive_debugfs_init(xive
);
1945 struct kvm_device_ops kvm_xive_ops
= {
1947 .create
= kvmppc_xive_create
,
1948 .init
= kvmppc_xive_init
,
1949 .destroy
= kvmppc_xive_free
,
1950 .set_attr
= xive_set_attr
,
1951 .get_attr
= xive_get_attr
,
1952 .has_attr
= xive_has_attr
,
1955 void kvmppc_xive_init_module(void)
1957 __xive_vm_h_xirr
= xive_vm_h_xirr
;
1958 __xive_vm_h_ipoll
= xive_vm_h_ipoll
;
1959 __xive_vm_h_ipi
= xive_vm_h_ipi
;
1960 __xive_vm_h_cppr
= xive_vm_h_cppr
;
1961 __xive_vm_h_eoi
= xive_vm_h_eoi
;
1964 void kvmppc_xive_exit_module(void)
1966 __xive_vm_h_xirr
= NULL
;
1967 __xive_vm_h_ipoll
= NULL
;
1968 __xive_vm_h_ipi
= NULL
;
1969 __xive_vm_h_cppr
= NULL
;
1970 __xive_vm_h_eoi
= NULL
;