2 * interrupt.c - handling kvm guest interrupts
4 * Copyright IBM Corp. 2008
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #include <asm/lowcore.h>
14 #include <asm/uaccess.h>
15 #include <linux/kvm_host.h>
19 static int psw_extint_disabled(struct kvm_vcpu
*vcpu
)
21 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
);
24 static int psw_interrupts_disabled(struct kvm_vcpu
*vcpu
)
26 if ((vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PER
) ||
27 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
) ||
28 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
))
33 static int __interrupt_is_deliverable(struct kvm_vcpu
*vcpu
,
34 struct kvm_s390_interrupt_info
*inti
)
37 case KVM_S390_INT_EMERGENCY
:
38 if (psw_extint_disabled(vcpu
))
40 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x4000ul
)
43 case KVM_S390_INT_SERVICE
:
44 if (psw_extint_disabled(vcpu
))
46 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
)
49 case KVM_S390_INT_VIRTIO
:
50 if (psw_extint_disabled(vcpu
))
52 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
)
55 case KVM_S390_PROGRAM_INT
:
56 case KVM_S390_SIGP_STOP
:
57 case KVM_S390_SIGP_SET_PREFIX
:
58 case KVM_S390_RESTART
:
66 static void __set_cpu_idle(struct kvm_vcpu
*vcpu
)
68 BUG_ON(vcpu
->vcpu_id
> KVM_MAX_VCPUS
- 1);
69 atomic_set_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
70 set_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
73 static void __unset_cpu_idle(struct kvm_vcpu
*vcpu
)
75 BUG_ON(vcpu
->vcpu_id
> KVM_MAX_VCPUS
- 1);
76 atomic_clear_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
77 clear_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
80 static void __reset_intercept_indicators(struct kvm_vcpu
*vcpu
)
82 atomic_clear_mask(CPUSTAT_ECALL_PEND
|
83 CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
| CPUSTAT_STOP_INT
,
84 &vcpu
->arch
.sie_block
->cpuflags
);
85 vcpu
->arch
.sie_block
->lctl
= 0x0000;
88 static void __set_cpuflag(struct kvm_vcpu
*vcpu
, u32 flag
)
90 atomic_set_mask(flag
, &vcpu
->arch
.sie_block
->cpuflags
);
93 static void __set_intercept_indicator(struct kvm_vcpu
*vcpu
,
94 struct kvm_s390_interrupt_info
*inti
)
97 case KVM_S390_INT_EMERGENCY
:
98 case KVM_S390_INT_SERVICE
:
99 case KVM_S390_INT_VIRTIO
:
100 if (psw_extint_disabled(vcpu
))
101 __set_cpuflag(vcpu
, CPUSTAT_EXT_INT
);
103 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR0
;
105 case KVM_S390_SIGP_STOP
:
106 __set_cpuflag(vcpu
, CPUSTAT_STOP_INT
);
113 static void __do_deliver_interrupt(struct kvm_vcpu
*vcpu
,
114 struct kvm_s390_interrupt_info
*inti
)
116 const unsigned short table
[] = { 2, 4, 4, 6 };
117 int rc
, exception
= 0;
119 switch (inti
->type
) {
120 case KVM_S390_INT_EMERGENCY
:
121 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: sigp emerg");
122 vcpu
->stat
.deliver_emergency_signal
++;
123 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x1201);
127 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
128 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
132 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
133 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
138 case KVM_S390_INT_SERVICE
:
139 VCPU_EVENT(vcpu
, 4, "interrupt: sclp parm:%x",
140 inti
->ext
.ext_params
);
141 vcpu
->stat
.deliver_service_signal
++;
142 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x2401);
146 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
147 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
151 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
152 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
156 rc
= put_guest_u32(vcpu
, __LC_EXT_PARAMS
, inti
->ext
.ext_params
);
161 case KVM_S390_INT_VIRTIO
:
162 VCPU_EVENT(vcpu
, 4, "interrupt: virtio parm:%x,parm64:%lx",
163 inti
->ext
.ext_params
, inti
->ext
.ext_params2
);
164 vcpu
->stat
.deliver_virtio_interrupt
++;
165 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x2603);
169 rc
= put_guest_u16(vcpu
, __LC_CPU_ADDRESS
, 0x0d00);
173 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
174 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
178 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
179 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
183 rc
= put_guest_u32(vcpu
, __LC_EXT_PARAMS
, inti
->ext
.ext_params
);
187 rc
= put_guest_u64(vcpu
, __LC_PFAULT_INTPARM
,
188 inti
->ext
.ext_params2
);
193 case KVM_S390_SIGP_STOP
:
194 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: cpu stop");
195 vcpu
->stat
.deliver_stop_signal
++;
196 __set_intercept_indicator(vcpu
, inti
);
199 case KVM_S390_SIGP_SET_PREFIX
:
200 VCPU_EVENT(vcpu
, 4, "interrupt: set prefix to %x",
201 inti
->prefix
.address
);
202 vcpu
->stat
.deliver_prefix_signal
++;
203 vcpu
->arch
.sie_block
->prefix
= inti
->prefix
.address
;
204 vcpu
->arch
.sie_block
->ihcpu
= 0xffff;
207 case KVM_S390_RESTART
:
208 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: cpu restart");
209 vcpu
->stat
.deliver_restart_signal
++;
210 rc
= copy_to_guest(vcpu
, offsetof(struct _lowcore
,
211 restart_old_psw
), &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
215 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
216 offsetof(struct _lowcore
, restart_psw
), sizeof(psw_t
));
221 case KVM_S390_PROGRAM_INT
:
222 VCPU_EVENT(vcpu
, 4, "interrupt: pgm check code:%x, ilc:%x",
224 table
[vcpu
->arch
.sie_block
->ipa
>> 14]);
225 vcpu
->stat
.deliver_program_int
++;
226 rc
= put_guest_u16(vcpu
, __LC_PGM_INT_CODE
, inti
->pgm
.code
);
230 rc
= put_guest_u16(vcpu
, __LC_PGM_ILC
,
231 table
[vcpu
->arch
.sie_block
->ipa
>> 14]);
235 rc
= copy_to_guest(vcpu
, __LC_PGM_OLD_PSW
,
236 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
240 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
241 __LC_PGM_NEW_PSW
, sizeof(psw_t
));
251 VCPU_EVENT(vcpu
, 1, "%s", "program exception while delivering"
253 kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
254 if (inti
->type
== KVM_S390_PROGRAM_INT
) {
255 printk(KERN_WARNING
"kvm: recursive program check\n");
261 static int __try_deliver_ckc_interrupt(struct kvm_vcpu
*vcpu
)
263 int rc
, exception
= 0;
265 if (psw_extint_disabled(vcpu
))
267 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
269 rc
= put_guest_u16(vcpu
, __LC_EXT_INT_CODE
, 0x1004);
272 rc
= copy_to_guest(vcpu
, __LC_EXT_OLD_PSW
,
273 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
276 rc
= copy_from_guest(vcpu
, &vcpu
->arch
.sie_block
->gpsw
,
277 __LC_EXT_NEW_PSW
, sizeof(psw_t
));
282 VCPU_EVENT(vcpu
, 1, "%s", "program exception while delivering" \
284 kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
291 int kvm_cpu_has_interrupt(struct kvm_vcpu
*vcpu
)
293 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
294 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
295 struct kvm_s390_interrupt_info
*inti
;
298 if (atomic_read(&li
->active
)) {
299 spin_lock_bh(&li
->lock
);
300 list_for_each_entry(inti
, &li
->list
, list
)
301 if (__interrupt_is_deliverable(vcpu
, inti
)) {
305 spin_unlock_bh(&li
->lock
);
308 if ((!rc
) && atomic_read(&fi
->active
)) {
309 spin_lock_bh(&fi
->lock
);
310 list_for_each_entry(inti
, &fi
->list
, list
)
311 if (__interrupt_is_deliverable(vcpu
, inti
)) {
315 spin_unlock_bh(&fi
->lock
);
318 if ((!rc
) && (vcpu
->arch
.sie_block
->ckc
<
319 get_clock() + vcpu
->arch
.sie_block
->epoch
)) {
320 if ((!psw_extint_disabled(vcpu
)) &&
321 (vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
328 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
333 int kvm_s390_handle_wait(struct kvm_vcpu
*vcpu
)
336 DECLARE_WAITQUEUE(wait
, current
);
338 vcpu
->stat
.exit_wait_state
++;
339 if (kvm_cpu_has_interrupt(vcpu
))
342 __set_cpu_idle(vcpu
);
343 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
344 vcpu
->arch
.local_int
.timer_due
= 0;
345 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
347 if (psw_interrupts_disabled(vcpu
)) {
348 VCPU_EVENT(vcpu
, 3, "%s", "disabled wait");
349 __unset_cpu_idle(vcpu
);
350 return -ENOTSUPP
; /* disabled wait */
353 if (psw_extint_disabled(vcpu
) ||
354 (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))) {
355 VCPU_EVENT(vcpu
, 3, "%s", "enabled wait w/o timer");
359 now
= get_clock() + vcpu
->arch
.sie_block
->epoch
;
360 if (vcpu
->arch
.sie_block
->ckc
< now
) {
361 __unset_cpu_idle(vcpu
);
365 sltime
= (vcpu
->arch
.sie_block
->ckc
- now
) / (0xf4240000ul
/ HZ
) + 1;
367 vcpu
->arch
.ckc_timer
.expires
= jiffies
+ sltime
;
369 add_timer(&vcpu
->arch
.ckc_timer
);
370 VCPU_EVENT(vcpu
, 5, "enabled wait timer:%lx jiffies", sltime
);
372 spin_lock_bh(&vcpu
->arch
.local_int
.float_int
->lock
);
373 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
374 add_wait_queue(&vcpu
->arch
.local_int
.wq
, &wait
);
375 while (list_empty(&vcpu
->arch
.local_int
.list
) &&
376 list_empty(&vcpu
->arch
.local_int
.float_int
->list
) &&
377 (!vcpu
->arch
.local_int
.timer_due
) &&
378 !signal_pending(current
)) {
379 set_current_state(TASK_INTERRUPTIBLE
);
380 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
381 spin_unlock_bh(&vcpu
->arch
.local_int
.float_int
->lock
);
385 spin_lock_bh(&vcpu
->arch
.local_int
.float_int
->lock
);
386 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
388 __unset_cpu_idle(vcpu
);
389 __set_current_state(TASK_RUNNING
);
390 remove_wait_queue(&vcpu
->wq
, &wait
);
391 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
392 spin_unlock_bh(&vcpu
->arch
.local_int
.float_int
->lock
);
393 del_timer(&vcpu
->arch
.ckc_timer
);
397 void kvm_s390_idle_wakeup(unsigned long data
)
399 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*)data
;
401 spin_lock_bh(&vcpu
->arch
.local_int
.lock
);
402 vcpu
->arch
.local_int
.timer_due
= 1;
403 if (waitqueue_active(&vcpu
->arch
.local_int
.wq
))
404 wake_up_interruptible(&vcpu
->arch
.local_int
.wq
);
405 spin_unlock_bh(&vcpu
->arch
.local_int
.lock
);
409 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu
*vcpu
)
411 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
412 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
413 struct kvm_s390_interrupt_info
*n
, *inti
= NULL
;
416 __reset_intercept_indicators(vcpu
);
417 if (atomic_read(&li
->active
)) {
420 spin_lock_bh(&li
->lock
);
421 list_for_each_entry_safe(inti
, n
, &li
->list
, list
) {
422 if (__interrupt_is_deliverable(vcpu
, inti
)) {
423 list_del(&inti
->list
);
427 __set_intercept_indicator(vcpu
, inti
);
429 if (list_empty(&li
->list
))
430 atomic_set(&li
->active
, 0);
431 spin_unlock_bh(&li
->lock
);
433 __do_deliver_interrupt(vcpu
, inti
);
439 if ((vcpu
->arch
.sie_block
->ckc
<
440 get_clock() + vcpu
->arch
.sie_block
->epoch
))
441 __try_deliver_ckc_interrupt(vcpu
);
443 if (atomic_read(&fi
->active
)) {
446 spin_lock_bh(&fi
->lock
);
447 list_for_each_entry_safe(inti
, n
, &fi
->list
, list
) {
448 if (__interrupt_is_deliverable(vcpu
, inti
)) {
449 list_del(&inti
->list
);
453 __set_intercept_indicator(vcpu
, inti
);
455 if (list_empty(&fi
->list
))
456 atomic_set(&fi
->active
, 0);
457 spin_unlock_bh(&fi
->lock
);
459 __do_deliver_interrupt(vcpu
, inti
);
466 int kvm_s390_inject_program_int(struct kvm_vcpu
*vcpu
, u16 code
)
468 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
469 struct kvm_s390_interrupt_info
*inti
;
471 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
475 inti
->type
= KVM_S390_PROGRAM_INT
;;
476 inti
->pgm
.code
= code
;
478 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from kernel)", code
);
479 spin_lock_bh(&li
->lock
);
480 list_add(&inti
->list
, &li
->list
);
481 atomic_set(&li
->active
, 1);
482 BUG_ON(waitqueue_active(&li
->wq
));
483 spin_unlock_bh(&li
->lock
);
487 int kvm_s390_inject_vm(struct kvm
*kvm
,
488 struct kvm_s390_interrupt
*s390int
)
490 struct kvm_s390_local_interrupt
*li
;
491 struct kvm_s390_float_interrupt
*fi
;
492 struct kvm_s390_interrupt_info
*inti
;
495 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
499 switch (s390int
->type
) {
500 case KVM_S390_INT_VIRTIO
:
501 VM_EVENT(kvm
, 5, "inject: virtio parm:%x,parm64:%lx",
502 s390int
->parm
, s390int
->parm64
);
503 inti
->type
= s390int
->type
;
504 inti
->ext
.ext_params
= s390int
->parm
;
505 inti
->ext
.ext_params2
= s390int
->parm64
;
507 case KVM_S390_INT_SERVICE
:
508 VM_EVENT(kvm
, 5, "inject: sclp parm:%x", s390int
->parm
);
509 inti
->type
= s390int
->type
;
510 inti
->ext
.ext_params
= s390int
->parm
;
512 case KVM_S390_PROGRAM_INT
:
513 case KVM_S390_SIGP_STOP
:
514 case KVM_S390_INT_EMERGENCY
:
520 mutex_lock(&kvm
->lock
);
521 fi
= &kvm
->arch
.float_int
;
522 spin_lock_bh(&fi
->lock
);
523 list_add_tail(&inti
->list
, &fi
->list
);
524 atomic_set(&fi
->active
, 1);
525 sigcpu
= find_first_bit(fi
->idle_mask
, KVM_MAX_VCPUS
);
526 if (sigcpu
== KVM_MAX_VCPUS
) {
528 sigcpu
= fi
->next_rr_cpu
++;
529 if (sigcpu
== KVM_MAX_VCPUS
)
530 sigcpu
= fi
->next_rr_cpu
= 0;
531 } while (fi
->local_int
[sigcpu
] == NULL
);
533 li
= fi
->local_int
[sigcpu
];
534 spin_lock_bh(&li
->lock
);
535 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
536 if (waitqueue_active(&li
->wq
))
537 wake_up_interruptible(&li
->wq
);
538 spin_unlock_bh(&li
->lock
);
539 spin_unlock_bh(&fi
->lock
);
540 mutex_unlock(&kvm
->lock
);
544 int kvm_s390_inject_vcpu(struct kvm_vcpu
*vcpu
,
545 struct kvm_s390_interrupt
*s390int
)
547 struct kvm_s390_local_interrupt
*li
;
548 struct kvm_s390_interrupt_info
*inti
;
550 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
554 switch (s390int
->type
) {
555 case KVM_S390_PROGRAM_INT
:
556 if (s390int
->parm
& 0xffff0000) {
560 inti
->type
= s390int
->type
;
561 inti
->pgm
.code
= s390int
->parm
;
562 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from user)",
565 case KVM_S390_SIGP_STOP
:
566 case KVM_S390_RESTART
:
567 case KVM_S390_SIGP_SET_PREFIX
:
568 case KVM_S390_INT_EMERGENCY
:
569 VCPU_EVENT(vcpu
, 3, "inject: type %x", s390int
->type
);
570 inti
->type
= s390int
->type
;
572 case KVM_S390_INT_VIRTIO
:
573 case KVM_S390_INT_SERVICE
:
579 mutex_lock(&vcpu
->kvm
->lock
);
580 li
= &vcpu
->arch
.local_int
;
581 spin_lock_bh(&li
->lock
);
582 if (inti
->type
== KVM_S390_PROGRAM_INT
)
583 list_add(&inti
->list
, &li
->list
);
585 list_add_tail(&inti
->list
, &li
->list
);
586 atomic_set(&li
->active
, 1);
587 if (inti
->type
== KVM_S390_SIGP_STOP
)
588 li
->action_bits
|= ACTION_STOP_ON_STOP
;
589 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
590 if (waitqueue_active(&li
->wq
))
591 wake_up_interruptible(&vcpu
->arch
.local_int
.wq
);
592 spin_unlock_bh(&li
->lock
);
593 mutex_unlock(&vcpu
->kvm
->lock
);